code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def __UpperCAmelCase ( ):
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__lowercase : Optional[Any] = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , __UpperCamelCase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def __UpperCAmelCase ( ):
assert _test_patching.open is open
__lowercase : Optional[Any] = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , __UpperCamelCase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def __UpperCAmelCase ( ):
# pandas.read_csv is not present in _test_patching
__lowercase : Union[str, Any] = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , __UpperCamelCase ):
pass
def __UpperCAmelCase ( ):
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
__lowercase : Any = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , __UpperCamelCase ) is None
with patch_submodule(_test_patching , '''len''' , __UpperCamelCase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def __UpperCAmelCase ( ):
__lowercase : str = '''__test_patch_submodule_start_and_stop_mock__'''
__lowercase : int = patch_submodule(_test_patching , '''open''' , __UpperCamelCase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def __UpperCAmelCase ( ):
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__lowercase : int = '''__test_patch_submodule_successive_join__'''
__lowercase : Tuple = '''__test_patch_submodule_successive_dirname__'''
__lowercase : Dict = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , __UpperCamelCase ):
with patch_submodule(_test_patching , '''os.rename''' , __UpperCamelCase ):
with patch_submodule(_test_patching , '''os.path.dirname''' , __UpperCamelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , __UpperCamelCase ):
with patch_submodule(_test_patching , '''os.path.join''' , __UpperCamelCase ):
with patch_submodule(_test_patching , '''os.path.dirname''' , __UpperCamelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def __UpperCAmelCase ( ):
__lowercase : Optional[Any] = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , __UpperCamelCase ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , __UpperCamelCase ):
pass
| 249 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
a_ = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ ( datasets.BuilderConfig ):
UpperCamelCase =1_00_00
UpperCamelCase =None
UpperCamelCase =None
class UpperCAmelCase_ ( datasets.ArrowBasedBuilder ):
UpperCamelCase =ParquetConfig
def _lowerCamelCase ( self ) -> List[str]:
return datasets.DatasetInfo(features=self.config.features )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Tuple:
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__lowercase : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCamelCase_ , (str, list, tuple) ):
__lowercase : str = data_files
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowercase : Union[str, Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__lowercase : int = [dl_manager.iter_files(UpperCamelCase_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__lowercase : int = []
for split_name, files in data_files.items():
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowercase : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__lowercase : Any = [dl_manager.iter_files(UpperCamelCase_ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(UpperCamelCase_ ):
with open(UpperCamelCase_ , '''rb''' ) as f:
__lowercase : Any = datasets.Features.from_arrow_schema(pq.read_schema(UpperCamelCase_ ) )
break
splits.append(datasets.SplitGenerator(name=UpperCamelCase_ , gen_kwargs={'''files''': files} ) )
return splits
def _lowerCamelCase ( self , UpperCamelCase_ ) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__lowercase : Tuple = table_cast(UpperCamelCase_ , self.info.features.arrow_schema )
return pa_table
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Tuple:
__lowercase : Union[str, Any] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCamelCase_ ) ):
with open(UpperCamelCase_ , '''rb''' ) as f:
__lowercase : Union[str, Any] = pq.ParquetFile(UpperCamelCase_ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__lowercase : Dict = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"""{file_idx}_{batch_idx}""", self._cast_table(UpperCamelCase_ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCamelCase_ )}: {e}""" )
raise
| 249 | 1 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def a__ ( __UpperCamelCase , __UpperCamelCase = "cpu" , __UpperCamelCase = None ):
SCREAMING_SNAKE_CASE_ = torch.load(__a , map_location=__a )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__a , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
SCREAMING_SNAKE_CASE_ = v.half()
if save_path is None: # overwrite src_path
SCREAMING_SNAKE_CASE_ = src_path
torch.save(__a , __a )
if __name__ == "__main__":
fire.Fire(convert)
| 350 | from __future__ import annotations
import numpy as np
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = np.shape(__UpperCamelCase )
if rows != columns:
SCREAMING_SNAKE_CASE_ = (
"'table' has to be of square shaped array but got a "
F'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = np.zeros((rows, columns) )
SCREAMING_SNAKE_CASE_ = np.zeros((rows, columns) )
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = sum(lower[i][k] * upper[k][j] for k in range(__UpperCamelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
SCREAMING_SNAKE_CASE_ = (table[i][j] - total) / upper[j][j]
SCREAMING_SNAKE_CASE_ = 1
for j in range(__UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = sum(lower[i][k] * upper[k][j] for k in range(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE_ = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 305 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowercase = 42 # [batch_size x 3]
lowercase = 42 # [batch_size x 3]
lowercase = 42 # [batch_size x 3]
lowercase = 42 # [batch_size x 3]
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
def lowerCamelCase ( self : Optional[Any] ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def lowerCamelCase ( self : Union[str, Any] ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def lowerCamelCase ( self : str ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def lowerCamelCase ( self : List[str] ):
snake_case__ : str = torch.arange(self.height * self.width )
snake_case__ : Optional[Any] = torch.stack(
[
pixel_indices % self.width,
torch.div(snake_case_ , self.width , rounding_mode="""trunc""" ),
] , axis=1 , )
return coords
@property
def lowerCamelCase ( self : Tuple ):
snake_case__ , *snake_case__ : List[Any] = self.shape
snake_case__ : str = int(np.prod(snake_case_ ) )
snake_case__ : List[Any] = self.get_image_coords()
snake_case__ : Tuple = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
snake_case__ : int = self.get_camera_rays(snake_case_ )
snake_case__ : Tuple = rays.view(snake_case_ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def lowerCamelCase ( self : Any , snake_case_ : torch.Tensor ):
snake_case__ , *snake_case__ , snake_case__ : Union[str, Any] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
snake_case__ : int = coords.view(snake_case_ , -1 , 2 )
snake_case__ : Dict = self.resolution()
snake_case__ : List[str] = self.fov()
snake_case__ : Union[str, Any] = (flat.float() / (res - 1)) * 2 - 1
snake_case__ : str = fracs * torch.tan(fov / 2 )
snake_case__ : int = fracs.view(snake_case_ , -1 , 2 )
snake_case__ : List[str] = (
self.z.view(snake_case_ , 1 , 3 )
+ self.x.view(snake_case_ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(snake_case_ , 1 , 3 ) * fracs[:, :, 1:]
)
snake_case__ : str = directions / directions.norm(dim=-1 , keepdim=snake_case_ )
snake_case__ : Dict = torch.stack(
[
torch.broadcast_to(self.origin.view(snake_case_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(snake_case_ , *snake_case_ , 2 , 3 )
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : int , snake_case_ : int ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=snake_case_ , height=snake_case_ , x_fov=self.x_fov , y_fov=self.y_fov , )
def __snake_case( _lowerCAmelCase ) -> DifferentiableProjectiveCamera:
snake_case__ : Union[str, Any] = []
snake_case__ : int = []
snake_case__ : List[Any] = []
snake_case__ : Tuple = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
snake_case__ : Any = np.array([np.sin(_lowerCAmelCase ), np.cos(_lowerCAmelCase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
snake_case__ : Optional[int] = -z * 4
snake_case__ : List[str] = np.array([np.cos(_lowerCAmelCase ), -np.sin(_lowerCAmelCase ), 0.0] )
snake_case__ : Optional[int] = np.cross(_lowerCAmelCase , _lowerCAmelCase )
origins.append(_lowerCAmelCase )
xs.append(_lowerCAmelCase )
ys.append(_lowerCAmelCase )
zs.append(_lowerCAmelCase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(_lowerCAmelCase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(_lowerCAmelCase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(_lowerCAmelCase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(_lowerCAmelCase , axis=0 ) ).float() , width=_lowerCAmelCase , height=_lowerCAmelCase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(_lowerCAmelCase )) , )
| 35 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
_lowerCamelCase : Dict = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 258 | 0 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE_( *lowercase , **lowercase ) -> Optional[Any]:
pass
@is_pipeline_test
@require_vision
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase ) -> int:
lowerCamelCase_ = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
lowerCamelCase_ = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> Dict:
lowerCamelCase_ = object_detector(examples[0] , threshold=0.0 )
lowerCamelCase_ = len(lowercase )
self.assertGreater(lowercase , 0 )
self.assertEqual(
lowercase , [
{
"score": ANY(lowercase ),
"label": ANY(lowercase ),
"box": {"xmin": ANY(lowercase ), "ymin": ANY(lowercase ), "xmax": ANY(lowercase ), "ymax": ANY(lowercase )},
}
for i in range(lowercase )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
pass
@require_torch
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
lowerCamelCase_ = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.6_4 , )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
{"score": 0.7_2_3_5, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7_2_1_8, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7_1_8_4, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6_7_4_8, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_6_5_6, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_6_1_4, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_4_5_6, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.6_4_2, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6_4_1_9, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
] , )
lowerCamelCase_ = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.6_4 , )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
[
{"score": 0.7_2_3_5, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7_2_1_8, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7_1_8_4, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6_7_4_8, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_6_5_6, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_6_1_4, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_4_5_6, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.6_4_2, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6_4_1_9, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
]
] , )
@require_torch
@slow
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = pipeline("zero-shot-object-detection" )
lowerCamelCase_ = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
{"score": 0.2_8_6_8, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_7_7, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2_5_3_7, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1_4_7_4, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1_2_0_8, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
] , )
lowerCamelCase_ = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
[
{"score": 0.2_8_6_8, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_7_7, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2_5_3_7, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1_4_7_4, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1_2_0_8, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
[
{"score": 0.2_8_6_8, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_7_7, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2_5_3_7, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1_4_7_4, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1_2_0_8, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
pass
@require_torch
@slow
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = 0.2
lowerCamelCase_ = pipeline("zero-shot-object-detection" )
lowerCamelCase_ = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=lowercase , )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
{"score": 0.2_8_6_8, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_7_7, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2_5_3_7, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
] , )
@require_torch
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = 2
lowerCamelCase_ = pipeline("zero-shot-object-detection" )
lowerCamelCase_ = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=lowercase , )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
{"score": 0.2_8_6_8, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.2_7_7, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
] , )
| 352 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def lowerCamelCase_ ( lowerCamelCase__ ):
if is_torch_version("<" , "2.0.0" ) or not hasattr(lowerCamelCase__ , "_dynamo" ):
return False
return isinstance(lowerCamelCase__ , torch._dynamo.eval_frame.OptimizedModule )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = True ):
lowerCamelCase_ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
lowerCamelCase_ = is_compiled_module(lowerCamelCase__ )
if is_compiled:
lowerCamelCase_ = model
lowerCamelCase_ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = model.module
if not keep_fpaa_wrapper:
lowerCamelCase_ = getattr(lowerCamelCase__ , "forward" )
lowerCamelCase_ = model.__dict__.pop("_original_forward" , lowerCamelCase__ )
if original_forward is not None:
while hasattr(lowerCamelCase__ , "__wrapped__" ):
lowerCamelCase_ = forward.__wrapped__
if forward == original_forward:
break
lowerCamelCase_ = forward
if getattr(lowerCamelCase__ , "_converted_to_transformer_engine" , lowerCamelCase__ ):
convert_model(lowerCamelCase__ , to_transformer_engine=lowerCamelCase__ )
if is_compiled:
lowerCamelCase_ = model
lowerCamelCase_ = compiled_model
return model
def lowerCamelCase_ ( ):
PartialState().wait_for_everyone()
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(lowerCamelCase__ , lowerCamelCase__ )
elif PartialState().local_process_index == 0:
torch.save(lowerCamelCase__ , lowerCamelCase__ )
@contextmanager
def lowerCamelCase_ ( **lowerCamelCase__ ):
for key, value in kwargs.items():
lowerCamelCase_ = str(lowerCamelCase__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def lowerCamelCase_ ( lowerCamelCase__ ):
if not hasattr(lowerCamelCase__ , "__qualname__" ) and not hasattr(lowerCamelCase__ , "__name__" ):
lowerCamelCase_ = getattr(lowerCamelCase__ , "__class__" , lowerCamelCase__ )
if hasattr(lowerCamelCase__ , "__qualname__" ):
return obj.__qualname__
if hasattr(lowerCamelCase__ , "__name__" ):
return obj.__name__
return str(lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
for key, value in source.items():
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = destination.setdefault(lowerCamelCase__ , {} )
merge_dicts(lowerCamelCase__ , lowerCamelCase__ )
else:
lowerCamelCase_ = value
return destination
def lowerCamelCase_ ( lowerCamelCase__ = None ):
if port is None:
lowerCamelCase_ = 2_9_5_0_0
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 47 | 0 |
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> bool:
'''simple docstring'''
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348 | from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( __lowerCAmelCase )-> Optional[Any]:
'''simple docstring'''
def is_in_circle(__lowerCAmelCase , __lowerCAmelCase ) -> bool:
UpperCAmelCase : List[Any] =sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
UpperCAmelCase : List[Any] =mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__lowerCAmelCase ) )
# The ratio of the area for circle to square is pi/4.
UpperCAmelCase : Dict =proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 , )-> float:
'''simple docstring'''
return mean(
function_to_integrate(uniform(__lowerCAmelCase , __lowerCAmelCase ) ) for _ in range(__lowerCAmelCase ) ) * (max_value - min_value)
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 )-> None:
'''simple docstring'''
def identity_function(__lowerCAmelCase ) -> float:
return x
UpperCAmelCase : List[Any] =area_under_curve_estimator(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
UpperCAmelCase : Dict =(max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print('''******************''' )
def lowerCAmelCase_ ( __lowerCAmelCase )-> None:
'''simple docstring'''
def function_to_integrate(__lowerCAmelCase ) -> float:
return sqrt(4.0 - x * x )
UpperCAmelCase : Dict =area_under_curve_estimator(
__lowerCAmelCase , __lowerCAmelCase , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348 | 1 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowercase_ = get_logger(__name__)
class A_ :
'''simple docstring'''
def __init__( self: Any , a: Optional[str] = None ):
__lowerCamelCase : Union[str, Any] = (
os.path.join(a , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__lowerCamelCase : List[Any] = Extractor
def _snake_case ( self: List[str] , a: str ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__lowerCamelCase : Optional[Any] = os.path.abspath(a )
return os.path.join(self.extract_dir , hash_url_to_filename(a ) )
def _snake_case ( self: Dict , a: str , a: bool ):
return force_extract or (
not os.path.isfile(a ) and not (os.path.isdir(a ) and os.listdir(a ))
)
def _snake_case ( self: Tuple , a: str , a: bool = False ):
__lowerCamelCase : Dict = self.extractor.infer_extractor_format(a )
if not extractor_format:
return input_path
__lowerCamelCase : Dict = self._get_output_path(a )
if self._do_extract(a , a ):
self.extractor.extract(a , a , a )
return output_path
class A_ ( __UpperCamelCase ):
'''simple docstring'''
@classmethod
@abstractmethod
def _snake_case ( cls: int , a: Union[Path, str] , **a: List[Any] ):
...
@staticmethod
@abstractmethod
def _snake_case ( a: Union[Path, str] , a: Union[Path, str] ):
...
class A_ ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__snake_case = []
@staticmethod
def _snake_case ( a: Union[Path, str] , a: int ):
with open(a , 'rb' ) as f:
return f.read(a )
@classmethod
def _snake_case ( cls: str , a: Union[Path, str] , a: bytes = b"" ):
if not magic_number:
__lowerCamelCase : Optional[Any] = max(len(a ) for cls_magic_number in cls.magic_numbers )
try:
__lowerCamelCase : Tuple = cls.read_magic_number(a , a )
except OSError:
return False
return any(magic_number.startswith(a ) for cls_magic_number in cls.magic_numbers )
class A_ ( __UpperCamelCase ):
'''simple docstring'''
@classmethod
def _snake_case ( cls: List[Any] , a: Union[Path, str] , **a: Tuple ):
return tarfile.is_tarfile(a )
@staticmethod
def _snake_case ( a: Optional[int] , a: int ):
def resolved(a: str ) -> str:
return os.path.realpath(os.path.abspath(a ) )
def badpath(a: str , a: str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(a , a ) ).startswith(a )
def badlink(a: Any , a: str ) -> bool:
# Links are interpreted relative to the directory containing the link
__lowerCamelCase : List[str] = resolved(os.path.join(a , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=a )
__lowerCamelCase : Tuple = resolved(a )
for finfo in members:
if badpath(finfo.name , a ):
logger.error(F'Extraction of {finfo.name} is blocked (illegal path)' )
elif finfo.issym() and badlink(a , a ):
logger.error(F'Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}' )
elif finfo.islnk() and badlink(a , a ):
logger.error(F'Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}' )
else:
yield finfo
@staticmethod
def _snake_case ( a: Union[Path, str] , a: Union[Path, str] ):
os.makedirs(a , exist_ok=a )
__lowerCamelCase : List[str] = tarfile.open(a )
tar_file.extractall(a , members=TarExtractor.safemembers(a , a ) )
tar_file.close()
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = [B"""\x1F\x8B"""]
@staticmethod
def _snake_case ( a: Union[Path, str] , a: Union[Path, str] ):
with gzip.open(a , 'rb' ) as gzip_file:
with open(a , 'wb' ) as extracted_file:
shutil.copyfileobj(a , a )
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = [
B"""PK\x03\x04""",
B"""PK\x05\x06""", # empty archive
B"""PK\x07\x08""", # spanned archive
]
@classmethod
def _snake_case ( cls: List[str] , a: Union[Path, str] , a: bytes = b"" ):
if super().is_extractable(a , magic_number=a ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(a , 'rb' ) as fp:
__lowerCamelCase : Dict = _EndRecData(a )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__lowerCamelCase : Dict = fp.read(a ) # CD is where we expect it to be
if len(a ) == sizeCentralDir:
__lowerCamelCase : Optional[int] = struct.unpack(a , a ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def _snake_case ( a: Union[Path, str] , a: Union[Path, str] ):
os.makedirs(a , exist_ok=a )
with zipfile.ZipFile(a , 'r' ) as zip_file:
zip_file.extractall(a )
zip_file.close()
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = [B"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def _snake_case ( a: Union[Path, str] , a: Union[Path, str] ):
with lzma.open(a ) as compressed_file:
with open(a , 'wb' ) as extracted_file:
shutil.copyfileobj(a , a )
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def _snake_case ( a: Union[Path, str] , a: Union[Path, str] ):
if not config.RARFILE_AVAILABLE:
raise ImportError('Please pip install rarfile' )
import rarfile
os.makedirs(a , exist_ok=a )
__lowerCamelCase : Tuple = rarfile.RarFile(a )
rf.extractall(a )
rf.close()
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = [B"""\x28\xb5\x2F\xFD"""]
@staticmethod
def _snake_case ( a: Union[Path, str] , a: Union[Path, str] ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('Please pip install zstandard' )
import zstandard as zstd
__lowerCamelCase : Union[str, Any] = zstd.ZstdDecompressor()
with open(a , 'rb' ) as ifh, open(a , 'wb' ) as ofh:
dctx.copy_stream(a , a )
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = [B"""\x42\x5A\x68"""]
@staticmethod
def _snake_case ( a: Union[Path, str] , a: Union[Path, str] ):
with bza.open(a , 'rb' ) as compressed_file:
with open(a , 'wb' ) as extracted_file:
shutil.copyfileobj(a , a )
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = [B"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def _snake_case ( a: Union[Path, str] , a: Union[Path, str] ):
if not config.PY7ZR_AVAILABLE:
raise ImportError('Please pip install py7zr' )
import pyazr
os.makedirs(a , exist_ok=a )
with pyazr.SevenZipFile(a , 'r' ) as archive:
archive.extractall(a )
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = [B"""\x04\x22\x4D\x18"""]
@staticmethod
def _snake_case ( a: Union[Path, str] , a: Union[Path, str] ):
if not config.LZ4_AVAILABLE:
raise ImportError('Please pip install lz4' )
import lza.frame
with lza.frame.open(a , 'rb' ) as compressed_file:
with open(a , 'wb' ) as extracted_file:
shutil.copyfileobj(a , a )
class A_ :
'''simple docstring'''
__snake_case = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def _snake_case ( cls: Dict ):
return max(
len(a )
for extractor in cls.extractors.values()
if issubclass(a , a )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def _snake_case ( a: Union[Path, str] , a: int ):
try:
return MagicNumberBaseExtractor.read_magic_number(a , magic_number_length=a )
except OSError:
return b""
@classmethod
def _snake_case ( cls: Tuple , a: Union[Path, str] , a: bool = False ):
warnings.warn(
'Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'infer_extractor_format\' instead.' , category=a , )
__lowerCamelCase : List[str] = cls.infer_extractor_format(a )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def _snake_case ( cls: List[Any] , a: Union[Path, str] ): # <Added version="2.4.0"/>
__lowerCamelCase : Optional[int] = cls._get_magic_number_max_length()
__lowerCamelCase : List[Any] = cls._read_magic_number(a , a )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(a , magic_number=a ):
return extractor_format
@classmethod
def _snake_case ( cls: int , a: Union[Path, str] , a: Union[Path, str] , a: Optional[str] = None , a: Optional[BaseExtractor] = "deprecated" , ):
os.makedirs(os.path.dirname(a ) , exist_ok=a )
# Prevent parallel extractions
__lowerCamelCase : Union[str, Any] = str(Path(a ).with_suffix('.lock' ) )
with FileLock(a ):
shutil.rmtree(a , ignore_errors=a )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(a , a ): # passed as positional arg
warnings.warn(
'Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'extractor_format\' instead.' , category=a , )
__lowerCamelCase : Dict = extractor if extractor != 'deprecated' else extractor_format
else:
__lowerCamelCase : Union[str, Any] = cls.extractors[extractor_format]
return extractor.extract(a , a )
else:
warnings.warn(
'Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '
'exception in 3.0.0.' , category=a , )
for extractor in cls.extractors.values():
if extractor.is_extractable(a ):
return extractor.extract(a , a )
| 194 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[str] = np.inf
def set_batch_size(SCREAMING_SNAKE_CASE__ ) -> None:
nonlocal batch_size
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Optional[Any] = min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and feature.dtype == "binary":
__lowerCamelCase : List[str] = min(SCREAMING_SNAKE_CASE__ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return None if batch_size is np.inf else batch_size
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self: Tuple , a: NestedDataStructureLike[PathLike] , a: Optional[NamedSplit] = None , a: Optional[Features] = None , a: str = None , a: bool = False , a: bool = False , a: Optional[int] = None , **a: Optional[Any] , ):
super().__init__(
a , split=a , features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , )
__lowerCamelCase : List[Any] = path_or_paths if isinstance(a , a ) else {self.split: path_or_paths}
__lowerCamelCase : Optional[Any] = _PACKAGED_DATASETS_MODULES['parquet'][1]
__lowerCamelCase : List[str] = Parquet(
cache_dir=a , data_files=a , features=a , hash=a , **a , )
def _snake_case ( self: List[str] ):
# Build iterable dataset
if self.streaming:
__lowerCamelCase : str = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowerCamelCase : str = None
__lowerCamelCase : Optional[Any] = None
__lowerCamelCase : Union[str, Any] = None
__lowerCamelCase : int = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , )
__lowerCamelCase : Tuple = self.builder.as_dataset(
split=self.split , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
class A_ :
'''simple docstring'''
def __init__( self: Optional[int] , a: Dataset , a: Union[PathLike, BinaryIO] , a: Optional[int] = None , **a: List[Any] , ):
__lowerCamelCase : Optional[int] = dataset
__lowerCamelCase : List[Any] = path_or_buf
__lowerCamelCase : List[str] = batch_size or get_writer_batch_size(dataset.features )
__lowerCamelCase : List[Any] = parquet_writer_kwargs
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : Optional[int] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , 'wb+' ) as buffer:
__lowerCamelCase : Optional[int] = self._write(file_obj=a , batch_size=a , **self.parquet_writer_kwargs )
else:
__lowerCamelCase : Any = self._write(file_obj=self.path_or_buf , batch_size=a , **self.parquet_writer_kwargs )
return written
def _snake_case ( self: Optional[int] , a: BinaryIO , a: int , **a: str ):
__lowerCamelCase : Dict = 0
__lowerCamelCase : Union[str, Any] = parquet_writer_kwargs.pop('path_or_buf' , a )
__lowerCamelCase : str = self.dataset.features.arrow_schema
__lowerCamelCase : Any = pq.ParquetWriter(a , schema=a , **a )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , a ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ):
__lowerCamelCase : Any = query_table(
table=self.dataset._data , key=slice(a , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(a )
written += batch.nbytes
writer.close()
return written
| 194 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=None , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Optional[int]:
_A : Optional[int] = size if size is not None else {"""shortest_edge""": 18}
_A : Optional[Any] = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_A : Tuple = parent
_A : int = batch_size
_A : int = num_channels
_A : int = image_size
_A : Optional[Any] = min_resolution
_A : Any = max_resolution
_A : Any = do_resize
_A : Any = size
_A : Optional[Any] = do_center_crop
_A : str = crop_size
_A : Union[str, Any] = do_normalize
_A : Any = image_mean
_A : Dict = image_std
def a__ ( self ) -> List[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = LevitImageProcessor if is_vision_available() else None
def a__ ( self ) -> List[Any]:
_A : Optional[Any] = LevitImageProcessingTester(self )
@property
def a__ ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Tuple:
_A : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """image_mean""" ) )
self.assertTrue(hasattr(_a , """image_std""" ) )
self.assertTrue(hasattr(_a , """do_normalize""" ) )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """do_center_crop""" ) )
self.assertTrue(hasattr(_a , """size""" ) )
def a__ ( self ) -> Optional[Any]:
_A : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_A : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def a__ ( self ) -> Optional[int]:
pass
def a__ ( self ) -> Tuple:
# Initialize image_processing
_A : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_A : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_A : Union[str, Any] = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def a__ ( self ) -> str:
# Initialize image_processing
_A : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_A : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_A : Tuple = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def a__ ( self ) -> Optional[int]:
# Initialize image_processing
_A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_A : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 26 |
class A__ :
def __init__( self : Optional[Any] , a : list ):
'''simple docstring'''
lowerCAmelCase__ : Dict = set_counts
lowerCAmelCase__ : str = max(a )
lowerCAmelCase__ : Any = len(a )
lowerCAmelCase__ : List[str] = [1] * num_sets
lowerCAmelCase__ : Dict = list(range(a ) )
def _lowerCamelCase ( self : Dict , a : int , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.get_parent(a )
lowerCAmelCase__ : Tuple = self.get_parent(a )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : str = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowerCAmelCase__ : List[Any] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Tuple = src_parent
lowerCAmelCase__ : Optional[int] = self.set_counts[src_parent]
lowerCAmelCase__ : Optional[Any] = max(self.max_set , a )
return True
def _lowerCamelCase ( self : Any , a : int ):
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
lowerCAmelCase__ : Tuple = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set] | 212 | 0 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( a_, a_, a_, a_, a_, ):
'''simple docstring'''
lowerCamelCase : Optional[int] = len(a_ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(a_ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col], [*diagonal_right_collisions, row - col], [*diagonal_left_collisions, row + col], a_, a_, )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : list[list[str]] = []
depth_first_search([], [], [], a_, a_ )
# Print all the boards
for board in boards:
for column in board:
print(a_ )
print('' )
print(len(a_ ), 'solutions were found.' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 205 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _lowercase ( __UpperCAmelCase ):
lowercase_ = 'sew-d'
def __init__( self , UpperCAmelCase_=32 , UpperCAmelCase_=768 , UpperCAmelCase_=12 , UpperCAmelCase_=12 , UpperCAmelCase_=3072 , UpperCAmelCase_=2 , UpperCAmelCase_=512 , UpperCAmelCase_=256 , UpperCAmelCase_=True , UpperCAmelCase_=True , UpperCAmelCase_=("p2c", "c2p") , UpperCAmelCase_="layer_norm" , UpperCAmelCase_="gelu_python" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-7 , UpperCAmelCase_=1E-5 , UpperCAmelCase_="group" , UpperCAmelCase_="gelu" , UpperCAmelCase_=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , UpperCAmelCase_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCAmelCase_=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCAmelCase_=False , UpperCAmelCase_=128 , UpperCAmelCase_=16 , UpperCAmelCase_=True , UpperCAmelCase_=0.05 , UpperCAmelCase_=10 , UpperCAmelCase_=2 , UpperCAmelCase_=0.0 , UpperCAmelCase_=10 , UpperCAmelCase_=0 , UpperCAmelCase_="mean" , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=256 , UpperCAmelCase_=0 , UpperCAmelCase_=1 , UpperCAmelCase_=2 , **UpperCAmelCase_ , ) -> Optional[Any]:
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ )
lowerCamelCase : Any = hidden_size
lowerCamelCase : Any = feat_extract_norm
lowerCamelCase : List[str] = feat_extract_activation
lowerCamelCase : str = list(UpperCAmelCase_ )
lowerCamelCase : Any = list(UpperCAmelCase_ )
lowerCamelCase : str = list(UpperCAmelCase_ )
lowerCamelCase : List[Any] = conv_bias
lowerCamelCase : Optional[int] = num_conv_pos_embeddings
lowerCamelCase : str = num_conv_pos_embedding_groups
lowerCamelCase : Optional[int] = len(self.conv_dim )
lowerCamelCase : Optional[int] = num_hidden_layers
lowerCamelCase : Union[str, Any] = intermediate_size
lowerCamelCase : str = squeeze_factor
lowerCamelCase : Any = max_position_embeddings
lowerCamelCase : List[Any] = position_buckets
lowerCamelCase : Union[str, Any] = share_att_key
lowerCamelCase : Optional[int] = relative_attention
lowerCamelCase : Tuple = norm_rel_ebd
lowerCamelCase : Union[str, Any] = list(UpperCAmelCase_ )
lowerCamelCase : List[Any] = hidden_act
lowerCamelCase : Optional[Any] = num_attention_heads
lowerCamelCase : Tuple = hidden_dropout
lowerCamelCase : List[Any] = attention_dropout
lowerCamelCase : Optional[Any] = activation_dropout
lowerCamelCase : List[str] = feat_proj_dropout
lowerCamelCase : List[str] = final_dropout
lowerCamelCase : str = layer_norm_eps
lowerCamelCase : int = feature_layer_norm_eps
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : int = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase : Any = apply_spec_augment
lowerCamelCase : Optional[int] = mask_time_prob
lowerCamelCase : Optional[Any] = mask_time_length
lowerCamelCase : str = mask_time_min_masks
lowerCamelCase : List[Any] = mask_feature_prob
lowerCamelCase : int = mask_feature_length
lowerCamelCase : List[Any] = mask_feature_min_masks
# ctc loss
lowerCamelCase : Optional[Any] = ctc_loss_reduction
lowerCamelCase : Union[str, Any] = ctc_zero_infinity
# sequence classification
lowerCamelCase : Optional[Any] = use_weighted_layer_sum
lowerCamelCase : Dict = classifier_proj_size
@property
def _UpperCamelCase ( self ) -> Optional[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 205 | 1 |
"""simple docstring"""
from __future__ import annotations
class __snake_case :
def __init__( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Any = text, pattern
_lowerCamelCase , _lowerCamelCase : Optional[int] = len(__lowerCAmelCase ), len(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : str ):
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : int ):
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : str = []
for i in range(self.textLen - self.patLen + 1 ):
_lowerCamelCase : str = self.mismatch_in_text(__lowerCAmelCase )
if mismatch_index == -1:
positions.append(__lowerCAmelCase )
else:
_lowerCamelCase : str = self.match_in_pattern(self.text[mismatch_index] )
_lowerCamelCase : str = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
lowerCAmelCase__ = '''ABAABA'''
lowerCAmelCase__ = '''AB'''
lowerCAmelCase__ = BoyerMooreSearch(text, pattern)
lowerCAmelCase__ = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 72 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCAmelCase__ = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase__ = {
'''facebook/bart-base''': 1024,
'''facebook/bart-large''': 1024,
'''facebook/bart-large-mnli''': 1024,
'''facebook/bart-large-cnn''': 1024,
'''facebook/bart-large-xsum''': 1024,
'''yjernite/bart_eli5''': 1024,
}
class __snake_case ( _lowercase):
snake_case__ : Any = VOCAB_FILES_NAMES
snake_case__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Optional[int] = ["input_ids", "attention_mask"]
snake_case__ : Any = BartTokenizer
def __init__( self : int , __lowerCAmelCase : Dict=None , __lowerCAmelCase : int=None , __lowerCAmelCase : Dict=None , __lowerCAmelCase : List[Any]="replace" , __lowerCAmelCase : Any="<s>" , __lowerCAmelCase : Optional[int]="</s>" , __lowerCAmelCase : str="</s>" , __lowerCAmelCase : Dict="<s>" , __lowerCAmelCase : Union[str, Any]="<unk>" , __lowerCAmelCase : Any="<pad>" , __lowerCAmelCase : Optional[Any]="<mask>" , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=True , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(
__lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , errors=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase , **__lowerCAmelCase , )
_lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space:
_lowerCamelCase : Dict = getattr(__lowerCAmelCase , pre_tok_state.pop('''type''' ) )
_lowerCamelCase : Any = add_prefix_space
_lowerCamelCase : int = pre_tok_class(**__lowerCAmelCase )
_lowerCamelCase : Optional[int] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_lowerCamelCase : List[str] = '''post_processor'''
_lowerCamelCase : List[str] = getattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase )
if tokenizer_component_instance:
_lowerCamelCase : int = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCamelCase : Tuple = tuple(state['''sep'''] )
if "cls" in state:
_lowerCamelCase : int = tuple(state['''cls'''] )
_lowerCamelCase : Union[str, Any] = False
if state.get('''add_prefix_space''' , __lowerCAmelCase ) != add_prefix_space:
_lowerCamelCase : Dict = add_prefix_space
_lowerCamelCase : Optional[Any] = True
if state.get('''trim_offsets''' , __lowerCAmelCase ) != trim_offsets:
_lowerCamelCase : Any = trim_offsets
_lowerCamelCase : str = True
if changes_to_apply:
_lowerCamelCase : List[str] = getattr(__lowerCAmelCase , state.pop('''type''' ) )
_lowerCamelCase : str = component_class(**__lowerCAmelCase )
setattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : Tuple = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else value
_lowerCamelCase : str = value
def SCREAMING_SNAKE_CASE ( self : int , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = kwargs.get('''is_split_into_words''' , __lowerCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : Tuple , **__lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = kwargs.get('''is_split_into_words''' , __lowerCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ):
"""simple docstring"""
_lowerCamelCase : Tuple = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]=None ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
_lowerCamelCase : List[str] = [self.sep_token_id]
_lowerCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 72 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase_ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Dict = ['pixel_values']
def __init__( self: Dict , UpperCamelCase_: bool = True , UpperCamelCase_: Dict[str, int] = None , UpperCamelCase_: PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase_: bool = True , UpperCamelCase_: Dict[str, int] = None , UpperCamelCase_: bool = True , UpperCamelCase_: Union[int, float] = 1 / 2_55 , UpperCamelCase_: bool = True , UpperCamelCase_: Optional[Union[float, List[float]]] = None , UpperCamelCase_: Optional[Union[float, List[float]]] = None , UpperCamelCase_: bool = True , **UpperCamelCase_: Any , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = size if size is not None else {"""shortest_edge""": 2_24}
__lowerCamelCase = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
__lowerCamelCase = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
__lowerCamelCase = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ , param_name="""crop_size""" )
__lowerCamelCase = do_resize
__lowerCamelCase = size
__lowerCamelCase = resample
__lowerCamelCase = do_center_crop
__lowerCamelCase = crop_size
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_normalize
__lowerCamelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowerCamelCase = image_std if image_std is not None else OPENAI_CLIP_STD
__lowerCamelCase = do_convert_rgb
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: np.ndarray , UpperCamelCase_: Dict[str, int] , UpperCamelCase_: PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Union[str, Any] , ):
__lowerCamelCase = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
__lowerCamelCase = get_resize_output_image_size(UpperCamelCase_ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase_ )
return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: np.ndarray , UpperCamelCase_: Dict[str, int] , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Tuple , ):
__lowerCamelCase = get_size_dict(UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(UpperCamelCase_ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: np.ndarray , UpperCamelCase_: Union[int, float] , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: Optional[Any] , ):
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: np.ndarray , UpperCamelCase_: Union[float, List[float]] , UpperCamelCase_: Union[float, List[float]] , UpperCamelCase_: Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_: int , ):
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: ImageInput , UpperCamelCase_: bool = None , UpperCamelCase_: Dict[str, int] = None , UpperCamelCase_: PILImageResampling = None , UpperCamelCase_: bool = None , UpperCamelCase_: int = None , UpperCamelCase_: bool = None , UpperCamelCase_: float = None , UpperCamelCase_: bool = None , UpperCamelCase_: Optional[Union[float, List[float]]] = None , UpperCamelCase_: Optional[Union[float, List[float]]] = None , UpperCamelCase_: bool = None , UpperCamelCase_: Optional[Union[str, TensorType]] = None , UpperCamelCase_: Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase_: Any , ):
__lowerCamelCase = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase = size if size is not None else self.size
__lowerCamelCase = get_size_dict(UpperCamelCase_ , param_name="""size""" , default_to_square=UpperCamelCase_ )
__lowerCamelCase = resample if resample is not None else self.resample
__lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCamelCase = crop_size if crop_size is not None else self.crop_size
__lowerCamelCase = get_size_dict(UpperCamelCase_ , param_name="""crop_size""" , default_to_square=UpperCamelCase_ )
__lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase = image_mean if image_mean is not None else self.image_mean
__lowerCamelCase = image_std if image_std is not None else self.image_std
__lowerCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowerCamelCase = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowerCamelCase = [convert_to_rgb(UpperCamelCase_ ) for image in images]
# All transformations expect numpy arrays.
__lowerCamelCase = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_resize:
__lowerCamelCase = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_center_crop:
__lowerCamelCase = [self.center_crop(image=UpperCamelCase_ , size=UpperCamelCase_ ) for image in images]
if do_rescale:
__lowerCamelCase = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_normalize:
__lowerCamelCase = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images]
__lowerCamelCase = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
__lowerCamelCase = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 29 |
def lowerCamelCase__ ( A__ : list ):
'''simple docstring'''
for i in range(len(A__ ) - 1 , 0 , -1 ):
__lowerCamelCase = False
for j in range(A__ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
__lowerCamelCase, __lowerCamelCase = unsorted[j - 1], unsorted[j]
__lowerCamelCase = True
for j in range(A__ ):
if unsorted[j] > unsorted[j + 1]:
__lowerCamelCase, __lowerCamelCase = unsorted[j + 1], unsorted[j]
__lowerCamelCase = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ = [int(item) for item in user_input.split(',')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 29 | 1 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_lowerCamelCase =logging.get_logger(__name__)
@dataclass
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : int = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self , **__magic_name__ ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowerCamelCase : List[str] = deprecated_arg[3:]
lowerCamelCase : int = not kwargs.pop(__magic_name__ )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
lowerCamelCase : List[str] = kwargs.pop("""tpu_name""" , self.tpu_name )
lowerCamelCase : Optional[int] = kwargs.pop("""device_idx""" , self.device_idx )
lowerCamelCase : Dict = kwargs.pop("""eager_mode""" , self.eager_mode )
lowerCamelCase : str = kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**__magic_name__ )
_UpperCAmelCase : str = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Name of TPU"""} , )
_UpperCAmelCase : int = field(
default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , )
_UpperCAmelCase : bool = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Benchmark models in eager model."""})
_UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."""
} , )
@cached_property
def UpperCamelCase__ ( self ):
requires_backends(self , ["""tf"""] )
lowerCamelCase : Union[str, Any] = None
if self.tpu:
try:
if self.tpu_name:
lowerCamelCase : Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
lowerCamelCase : Any = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
lowerCamelCase : List[Any] = None
return tpu
@cached_property
def UpperCamelCase__ ( self ):
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
lowerCamelCase : Tuple = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
lowerCamelCase : Dict = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
lowerCamelCase : Tuple = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' )
return strategy
@property
def UpperCamelCase__ ( self ):
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def UpperCamelCase__ ( self ):
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def UpperCamelCase__ ( self ):
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def UpperCamelCase__ ( self ):
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def UpperCamelCase__ ( self ):
return self.n_gpu > 0
| 287 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
lowerCamelCase : Any = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowerCamelCase : str = model(__magic_name__ )["""last_hidden_state"""]
lowerCamelCase : Union[str, Any] = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , __magic_name__ )
# compare the actual values for a slice.
lowerCamelCase : Dict = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 287 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : Union[str, Any] = logging.get_logger(__name__)
snake_case : Dict = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class _snake_case ( snake_case ):
UpperCamelCase__ = 'biogpt'
def __init__( self , _a=42_384 , _a=1_024 , _a=24 , _a=16 , _a=4_096 , _a="gelu" , _a=0.1 , _a=0.1 , _a=1_024 , _a=0.02 , _a=1e-12 , _a=True , _a=True , _a=0.0 , _a=0.0 , _a=1 , _a=0 , _a=2 , **_a , ):
__magic_name__ : List[str] = vocab_size
__magic_name__ : List[Any] = max_position_embeddings
__magic_name__ : Dict = hidden_size
__magic_name__ : Optional[Any] = num_hidden_layers
__magic_name__ : Optional[Any] = num_attention_heads
__magic_name__ : List[str] = intermediate_size
__magic_name__ : str = hidden_act
__magic_name__ : Union[str, Any] = hidden_dropout_prob
__magic_name__ : Optional[Any] = attention_probs_dropout_prob
__magic_name__ : str = initializer_range
__magic_name__ : int = layer_norm_eps
__magic_name__ : Any = scale_embedding
__magic_name__ : List[str] = use_cache
__magic_name__ : int = layerdrop
__magic_name__ : List[str] = activation_dropout
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
| 41 |
from __future__ import annotations
snake_case : Optional[int] = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
class _snake_case :
def __init__( self , _a , _a ):
__magic_name__ : Any = graph
# mapping node to its parent in resulting breadth first tree
__magic_name__ : dict[str, str | None] = {}
__magic_name__ : List[str] = source_vertex
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = {self.source_vertex}
__magic_name__ : Optional[int] = None
__magic_name__ : int = [self.source_vertex] # first in first out queue
while queue:
__magic_name__ : Optional[Any] = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_a )
__magic_name__ : Dict = vertex
queue.append(_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
if target_vertex == self.source_vertex:
return self.source_vertex
__magic_name__ : str = self.parent.get(_a )
if target_vertex_parent is None:
__magic_name__ : Union[str, Any] = (
f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(_a )
return self.shortest_path(_a ) + f'''->{target_vertex}'''
if __name__ == "__main__":
snake_case : int = Graph(graph, "G")
g.breath_first_search()
print(g.shortest_path("D"))
print(g.shortest_path("G"))
print(g.shortest_path("Foo"))
| 41 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase = {
"""configuration_vivit""": ["""VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VivitConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""VivitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VivitModel""",
"""VivitPreTrainedModel""",
"""VivitForVideoClassification""",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 195 | '''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
__SCREAMING_SNAKE_CASE : Dict = get_logger(__name__)
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[str] , A : Optional[str] = None ):
_UpperCAmelCase : Dict = (
os.path.join(A , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
_UpperCAmelCase : Union[str, Any] = Extractor
def _A ( self : Tuple , A : str ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
_UpperCAmelCase : Dict = os.path.abspath(A )
return os.path.join(self.extract_dir , hash_url_to_filename(A ) )
def _A ( self : int , A : str , A : bool ):
return force_extract or (
not os.path.isfile(A ) and not (os.path.isdir(A ) and os.listdir(A ))
)
def _A ( self : Optional[int] , A : str , A : bool = False ):
_UpperCAmelCase : Union[str, Any] = self.extractor.infer_extractor_format(A )
if not extractor_format:
return input_path
_UpperCAmelCase : Optional[Any] = self._get_output_path(A )
if self._do_extract(A , A ):
self.extractor.extract(A , A , A )
return output_path
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
@classmethod
@abstractmethod
def _A ( cls : str , A : Union[Path, str] , **A : Dict ):
...
@staticmethod
@abstractmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
...
class lowerCamelCase_ (snake_case__ , snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[bytes] = []
@staticmethod
def _A ( A : Union[Path, str] , A : int ):
with open(A , "rb" ) as f:
return f.read(A )
@classmethod
def _A ( cls : Any , A : Union[Path, str] , A : bytes = b"" ):
if not magic_number:
_UpperCAmelCase : Any = max(len(A ) for cls_magic_number in cls.magic_numbers )
try:
_UpperCAmelCase : int = cls.read_magic_number(A , A )
except OSError:
return False
return any(magic_number.startswith(A ) for cls_magic_number in cls.magic_numbers )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
@classmethod
def _A ( cls : str , A : Union[Path, str] , **A : List[Any] ):
return tarfile.is_tarfile(A )
@staticmethod
def _A ( A : Union[str, Any] , A : str ):
def resolved(A : str ) -> str:
return os.path.realpath(os.path.abspath(A ) )
def badpath(A : str , A : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(A , A ) ).startswith(A )
def badlink(A : str , A : str ) -> bool:
# Links are interpreted relative to the directory containing the link
_UpperCAmelCase : List[str] = resolved(os.path.join(A , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=A )
_UpperCAmelCase : Optional[int] = resolved(A )
for finfo in members:
if badpath(finfo.name , A ):
logger.error(F"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(A , A ):
logger.error(F"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(A , A ):
logger.error(F"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
os.makedirs(A , exist_ok=A )
_UpperCAmelCase : int = tarfile.open(A )
tar_file.extractall(A , members=TarExtractor.safemembers(A , A ) )
tar_file.close()
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Union[str, Any] = [b"\x1F\x8B"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
with gzip.open(A , "rb" ) as gzip_file:
with open(A , "wb" ) as extracted_file:
shutil.copyfileobj(A , A )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Dict = [
b"PK\x03\x04",
b"PK\x05\x06", # empty archive
b"PK\x07\x08", # spanned archive
]
@classmethod
def _A ( cls : Dict , A : Union[Path, str] , A : bytes = b"" ):
if super().is_extractable(A , magic_number=A ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(A , "rb" ) as fp:
_UpperCAmelCase : Tuple = _EndRecData(A )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
_UpperCAmelCase : Dict = fp.read(A ) # CD is where we expect it to be
if len(A ) == sizeCentralDir:
_UpperCAmelCase : Any = struct.unpack(A , A ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
os.makedirs(A , exist_ok=A )
with zipfile.ZipFile(A , "r" ) as zip_file:
zip_file.extractall(A )
zip_file.close()
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Dict = [b"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
with lzma.open(A ) as compressed_file:
with open(A , "wb" ) as extracted_file:
shutil.copyfileobj(A , A )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[str] = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(A , exist_ok=A )
_UpperCAmelCase : List[str] = rarfile.RarFile(A )
rf.extractall(A )
rf.close()
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[Any] = [b"\x28\xb5\x2F\xFD"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
_UpperCAmelCase : Optional[Any] = zstd.ZstdDecompressor()
with open(A , "rb" ) as ifh, open(A , "wb" ) as ofh:
dctx.copy_stream(A , A )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[Any] = [b"\x42\x5A\x68"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
with bza.open(A , "rb" ) as compressed_file:
with open(A , "wb" ) as extracted_file:
shutil.copyfileobj(A , A )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[Any] = [b"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(A , exist_ok=A )
with pyazr.SevenZipFile(A , "r" ) as archive:
archive.extractall(A )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[int] = [b"\x04\x22\x4D\x18"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(A , "rb" ) as compressed_file:
with open(A , "wb" ) as extracted_file:
shutil.copyfileobj(A , A )
class lowerCamelCase_ :
'''simple docstring'''
__UpperCamelCase: Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def _A ( cls : List[Any] ):
return max(
len(A )
for extractor in cls.extractors.values()
if issubclass(A , A )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def _A ( A : Union[Path, str] , A : int ):
try:
return MagicNumberBaseExtractor.read_magic_number(A , magic_number_length=A )
except OSError:
return b""
@classmethod
def _A ( cls : Optional[Any] , A : Union[Path, str] , A : bool = False ):
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead." , category=A , )
_UpperCAmelCase : Union[str, Any] = cls.infer_extractor_format(A )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def _A ( cls : Dict , A : Union[Path, str] ): # <Added version="2.4.0"/>
_UpperCAmelCase : Optional[int] = cls._get_magic_number_max_length()
_UpperCAmelCase : str = cls._read_magic_number(A , A )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(A , magic_number=A ):
return extractor_format
@classmethod
def _A ( cls : List[str] , A : Union[Path, str] , A : Union[Path, str] , A : Optional[str] = None , A : Optional[BaseExtractor] = "deprecated" , ):
os.makedirs(os.path.dirname(A ) , exist_ok=A )
# Prevent parallel extractions
_UpperCAmelCase : Tuple = str(Path(A ).with_suffix(".lock" ) )
with FileLock(A ):
shutil.rmtree(A , ignore_errors=A )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(A , A ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead." , category=A , )
_UpperCAmelCase : Tuple = extractor if extractor != "deprecated" else extractor_format
else:
_UpperCAmelCase : Tuple = cls.extractors[extractor_format]
return extractor.extract(A , A )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=A , )
for extractor in cls.extractors.values():
if extractor.is_extractable(A ):
return extractor.extract(A , A )
| 31 | 0 |
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
__snake_case : Tuple = [
# (stable-diffusion, HF Diffusers)
("""time_embed.0.weight""", """time_embedding.linear_1.weight"""),
("""time_embed.0.bias""", """time_embedding.linear_1.bias"""),
("""time_embed.2.weight""", """time_embedding.linear_2.weight"""),
("""time_embed.2.bias""", """time_embedding.linear_2.bias"""),
("""input_blocks.0.0.weight""", """conv_in.weight"""),
("""input_blocks.0.0.bias""", """conv_in.bias"""),
("""out.0.weight""", """conv_norm_out.weight"""),
("""out.0.bias""", """conv_norm_out.bias"""),
("""out.2.weight""", """conv_out.weight"""),
("""out.2.bias""", """conv_out.bias"""),
]
__snake_case : List[str] = [
# (stable-diffusion, HF Diffusers)
("""in_layers.0""", """norm1"""),
("""in_layers.2""", """conv1"""),
("""out_layers.0""", """norm2"""),
("""out_layers.3""", """conv2"""),
("""emb_layers.1""", """time_emb_proj"""),
("""skip_connection""", """conv_shortcut"""),
]
__snake_case : int = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
__snake_case : Optional[int] = f'down_blocks.{i}.resnets.{j}.'
__snake_case : Union[str, Any] = f'input_blocks.{3*i + j + 1}.0.'
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
__snake_case : List[str] = f'down_blocks.{i}.attentions.{j}.'
__snake_case : Dict = f'input_blocks.{3*i + j + 1}.1.'
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
__snake_case : Dict = f'up_blocks.{i}.resnets.{j}.'
__snake_case : Union[str, Any] = f'output_blocks.{3*i + j}.0.'
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
__snake_case : str = f'up_blocks.{i}.attentions.{j}.'
__snake_case : str = f'output_blocks.{3*i + j}.1.'
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
__snake_case : str = f'down_blocks.{i}.downsamplers.0.conv.'
__snake_case : str = f'input_blocks.{3*(i+1)}.0.op.'
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
__snake_case : int = f'up_blocks.{i}.upsamplers.0.'
__snake_case : Optional[Any] = f'output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
__snake_case : List[str] = """mid_block.attentions.0."""
__snake_case : Union[str, Any] = """middle_block.1."""
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
__snake_case : List[Any] = f'mid_block.resnets.{j}.'
__snake_case : Optional[Any] = f'middle_block.{2*j}.'
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def _UpperCamelCase ( lowerCAmelCase : Dict ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
lowerCAmelCase__ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
lowerCAmelCase__ = v.replace(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase__ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
lowerCAmelCase__ = v.replace(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase__ = v
lowerCAmelCase__ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
__snake_case : Union[str, Any] = [
# (stable-diffusion, HF Diffusers)
("""nin_shortcut""", """conv_shortcut"""),
("""norm_out""", """conv_norm_out"""),
("""mid.attn_1.""", """mid_block.attentions.0."""),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
__snake_case : Any = f'encoder.down_blocks.{i}.resnets.{j}.'
__snake_case : Union[str, Any] = f'encoder.down.{i}.block.{j}.'
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
__snake_case : Optional[int] = f'down_blocks.{i}.downsamplers.0.'
__snake_case : Optional[int] = f'down.{i}.downsample.'
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
__snake_case : str = f'up_blocks.{i}.upsamplers.0.'
__snake_case : Any = f'up.{3-i}.upsample.'
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
__snake_case : int = f'decoder.up_blocks.{i}.resnets.{j}.'
__snake_case : List[Any] = f'decoder.up.{3-i}.block.{j}.'
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
__snake_case : List[str] = f'mid_block.resnets.{i}.'
__snake_case : Optional[Any] = f'mid.block_{i+1}.'
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
__snake_case : Tuple = [
# (stable-diffusion, HF Diffusers)
("""norm.""", """group_norm."""),
("""q.""", """query."""),
("""k.""", """key."""),
("""v.""", """value."""),
("""proj_out.""", """proj_attn."""),
]
def _UpperCamelCase ( lowerCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
return w.reshape(*w.shape , 1 , 1 )
def _UpperCamelCase ( lowerCAmelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
lowerCAmelCase__ = v.replace(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase__ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
lowerCAmelCase__ = v.replace(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase__ = v
lowerCAmelCase__ = {v: vae_state_dict[k] for k, v in mapping.items()}
lowerCAmelCase__ = ['q', 'k', 'v', 'proj_out']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F"mid.attn_1.{weight_name}.weight" in k:
print(F"Reshaping {k} for SD format" )
lowerCAmelCase__ = reshape_weight_for_sd(lowerCAmelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
__snake_case : Optional[int] = [
# (stable-diffusion, HF Diffusers)
("""resblocks.""", """text_model.encoder.layers."""),
("""ln_1""", """layer_norm1"""),
("""ln_2""", """layer_norm2"""),
(""".c_fc.""", """.fc1."""),
(""".c_proj.""", """.fc2."""),
(""".attn""", """.self_attn"""),
("""ln_final.""", """transformer.text_model.final_layer_norm."""),
("""token_embedding.weight""", """transformer.text_model.embeddings.token_embedding.weight"""),
("""positional_embedding""", """transformer.text_model.embeddings.position_embedding.weight"""),
]
__snake_case : str = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
__snake_case : int = re.compile("""|""".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
__snake_case : Optional[int] = {"""q""": 0, """k""": 1, """v""": 2}
def _UpperCamelCase ( lowerCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
for k, v in text_enc_dict.items():
if (
k.endswith('.self_attn.q_proj.weight' )
or k.endswith('.self_attn.k_proj.weight' )
or k.endswith('.self_attn.v_proj.weight' )
):
lowerCAmelCase__ = k[: -len('.q_proj.weight' )]
lowerCAmelCase__ = k[-len('q_proj.weight' )]
if k_pre not in capture_qkv_weight:
lowerCAmelCase__ = [None, None, None]
lowerCAmelCase__ = v
continue
if (
k.endswith('.self_attn.q_proj.bias' )
or k.endswith('.self_attn.k_proj.bias' )
or k.endswith('.self_attn.v_proj.bias' )
):
lowerCAmelCase__ = k[: -len('.q_proj.bias' )]
lowerCAmelCase__ = k[-len('q_proj.bias' )]
if k_pre not in capture_qkv_bias:
lowerCAmelCase__ = [None, None, None]
lowerCAmelCase__ = v
continue
lowerCAmelCase__ = textenc_pattern.sub(lambda lowerCAmelCase : protected[re.escape(m.group(0 ) )] , lowerCAmelCase )
lowerCAmelCase__ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
lowerCAmelCase__ = textenc_pattern.sub(lambda lowerCAmelCase : protected[re.escape(m.group(0 ) )] , lowerCAmelCase )
lowerCAmelCase__ = torch.cat(lowerCAmelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
lowerCAmelCase__ = textenc_pattern.sub(lambda lowerCAmelCase : protected[re.escape(m.group(0 ) )] , lowerCAmelCase )
lowerCAmelCase__ = torch.cat(lowerCAmelCase )
return new_state_dict
def _UpperCamelCase ( lowerCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
return text_enc_dict
if __name__ == "__main__":
__snake_case : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--use_safetensors""", action="""store_true""", help="""Save weights use safetensors, default is ckpt."""
)
__snake_case : List[str] = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
__snake_case : int = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.safetensors""")
__snake_case : Union[str, Any] = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.safetensors""")
__snake_case : Union[str, Any] = osp.join(args.model_path, """text_encoder""", """model.safetensors""")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
__snake_case : Dict = load_file(unet_path, device="""cpu""")
else:
__snake_case : Dict = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.bin""")
__snake_case : int = torch.load(unet_path, map_location="""cpu""")
if osp.exists(vae_path):
__snake_case : Dict = load_file(vae_path, device="""cpu""")
else:
__snake_case : int = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.bin""")
__snake_case : Tuple = torch.load(vae_path, map_location="""cpu""")
if osp.exists(text_enc_path):
__snake_case : Optional[int] = load_file(text_enc_path, device="""cpu""")
else:
__snake_case : int = osp.join(args.model_path, """text_encoder""", """pytorch_model.bin""")
__snake_case : Union[str, Any] = torch.load(text_enc_path, map_location="""cpu""")
# Convert the UNet model
__snake_case : List[Any] = convert_unet_state_dict(unet_state_dict)
__snake_case : int = {"""model.diffusion_model.""" + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
__snake_case : List[Any] = convert_vae_state_dict(vae_state_dict)
__snake_case : int = {"""first_stage_model.""" + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
__snake_case : List[Any] = """text_model.encoder.layers.22.layer_norm2.bias""" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
__snake_case : Tuple = {"""transformer.""" + k: v for k, v in text_enc_dict.items()}
__snake_case : Optional[int] = convert_text_enc_state_dict_vaa(text_enc_dict)
__snake_case : Dict = {"""cond_stage_model.model.""" + k: v for k, v in text_enc_dict.items()}
else:
__snake_case : List[str] = convert_text_enc_state_dict(text_enc_dict)
__snake_case : Any = {"""cond_stage_model.transformer.""" + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
__snake_case : Optional[Any] = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
__snake_case : Optional[int] = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
__snake_case : Optional[int] = {"""state_dict""": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 361 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __SCREAMING_SNAKE_CASE ( __lowercase):
_SCREAMING_SNAKE_CASE : List[Any] = '''microsoft/speecht5_tts'''
_SCREAMING_SNAKE_CASE : Any = (
'''This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '''
'''text to read (in English) and returns a waveform object containing the sound.'''
)
_SCREAMING_SNAKE_CASE : int = '''text_reader'''
_SCREAMING_SNAKE_CASE : List[str] = SpeechTaProcessor
_SCREAMING_SNAKE_CASE : Optional[int] = SpeechTaForTextToSpeech
_SCREAMING_SNAKE_CASE : List[Any] = SpeechTaHifiGan
_SCREAMING_SNAKE_CASE : Optional[int] = ['''text''']
_SCREAMING_SNAKE_CASE : List[Any] = ['''audio''']
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.post_processor is None:
lowerCAmelCase__ = 'microsoft/speecht5_hifigan'
super().setup()
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
lowerCAmelCase__ = self.pre_processor(text=_UpperCamelCase , return_tensors='pt' , truncation=_UpperCamelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('Datasets needs to be installed if not passing speaker embeddings.' )
lowerCAmelCase__ = load_dataset('Matthijs/cmu-arctic-xvectors' , split='validation' )
lowerCAmelCase__ = torch.tensor(embeddings_dataset[73_05]['xvector'] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
with torch.no_grad():
return self.model.generate_speech(**_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
with torch.no_grad():
return self.post_processor(_UpperCamelCase ).cpu().detach()
| 122 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCamelCase_ :Dict , lowerCamelCase_ :Union[str, Any]=7 , lowerCamelCase_ :Union[str, Any]=3 , lowerCamelCase_ :Union[str, Any]=30 , lowerCamelCase_ :List[str]=400 , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :Optional[int]=None , lowerCamelCase_ :Dict=0.9 , lowerCamelCase_ :Dict=None , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :str=[0.5, 0.5, 0.5] , lowerCamelCase_ :str=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
lowerCamelCase__ : Tuple =size if size is not None else {'shortest_edge': 30}
lowerCamelCase__ : Optional[int] =crop_size if crop_size is not None else {'height': 30, 'width': 30}
lowerCamelCase__ : str =parent
lowerCamelCase__ : Optional[int] =batch_size
lowerCamelCase__ : int =num_channels
lowerCamelCase__ : int =min_resolution
lowerCamelCase__ : Optional[Any] =max_resolution
lowerCamelCase__ : List[Any] =do_resize_and_center_crop
lowerCamelCase__ : List[str] =size
lowerCamelCase__ : Optional[Any] =crop_pct
lowerCamelCase__ : Tuple =crop_size
lowerCamelCase__ : List[str] =do_normalize
lowerCamelCase__ : List[Any] =image_mean
lowerCamelCase__ : Tuple =image_std
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class A_ ( A__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = PoolFormerImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =PoolFormerImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self :List[str] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
lowerCamelCase__ : Tuple =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , 'do_resize_and_center_crop' ) )
self.assertTrue(hasattr(lowerCamelCase_ , 'size' ) )
self.assertTrue(hasattr(lowerCamelCase_ , 'crop_pct' ) )
self.assertTrue(hasattr(lowerCamelCase_ , 'do_normalize' ) )
self.assertTrue(hasattr(lowerCamelCase_ , 'image_mean' ) )
self.assertTrue(hasattr(lowerCamelCase_ , 'image_std' ) )
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
lowerCamelCase__ : List[str] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 30} )
self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30} )
lowerCamelCase__ : str =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self :Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : Dict =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : Union[str, Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
lowerCamelCase__ : Optional[Any] =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCamelCase__ : Dict =image_processing(lowerCamelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase__ ( self :List[str] ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : Dict =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
lowerCamelCase__ : Dict =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCamelCase__ : Union[str, Any] =image_processing(lowerCamelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
lowerCamelCase__ : str =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : int =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
lowerCamelCase__ : Any =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCamelCase__ : Optional[Any] =image_processing(lowerCamelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , ) | 126 |
"""simple docstring"""
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ : List[str] ) ->List[str]:
print('Loading config file...' )
def flatten_yaml_as_dict(snake_case_ : str , snake_case_ : Optional[int]="" , snake_case_ : Optional[Any]="." ):
lowerCamelCase__ : Union[str, Any] =[]
for k, v in d.items():
lowerCamelCase__ : Any =parent_key + sep + k if parent_key else k
if isinstance(snake_case_ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(snake_case_ , snake_case_ , sep=snake_case_ ).items() )
else:
items.append((new_key, v) )
return dict(snake_case_ )
lowerCamelCase__ : str =argparse.Namespace()
with open(snake_case_ , 'r' ) as yaml_file:
try:
lowerCamelCase__ : Union[str, Any] =yaml.load(snake_case_ , Loader=yaml.FullLoader )
lowerCamelCase__ : List[Any] =flatten_yaml_as_dict(snake_case_ )
for k, v in flat_cfg.items():
setattr(snake_case_ , snake_case_ , snake_case_ )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(snake_case_ , str(snake_case_ ) ) )
return config
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Optional[Any] ) ->List[Any]:
lowerCamelCase__ : Tuple =MobileViTVaConfig()
lowerCamelCase__ : List[Any] =False
# dataset
if task_name.startswith('imagenet1k_' ):
lowerCamelCase__ : List[str] =1_0_0_0
if int(task_name.strip().split('_' )[-1] ) == 3_8_4:
lowerCamelCase__ : Optional[int] =3_8_4
else:
lowerCamelCase__ : Dict =2_5_6
lowerCamelCase__ : Dict ='imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
lowerCamelCase__ : Tuple =2_1_0_0_0
if int(task_name.strip().split('_' )[-1] ) == 3_8_4:
lowerCamelCase__ : Tuple =3_8_4
else:
lowerCamelCase__ : List[str] =2_5_6
lowerCamelCase__ : Any ='imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
lowerCamelCase__ : int =1_5_1
lowerCamelCase__ : int =5_1_2
lowerCamelCase__ : Dict ='ade20k-id2label.json'
lowerCamelCase__ : Union[str, Any] =True
elif task_name.startswith('voc_' ):
lowerCamelCase__ : Optional[Any] =2_1
lowerCamelCase__ : Dict =5_1_2
lowerCamelCase__ : Union[str, Any] ='pascal-voc-id2label.json'
lowerCamelCase__ : int =True
# orig_config
lowerCamelCase__ : Union[str, Any] =load_orig_config_file(snake_case_ )
assert getattr(snake_case_ , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model"
lowerCamelCase__ : List[str] =getattr(snake_case_ , 'model.classification.mitv2.width_multiplier' , 1.0 )
assert (
getattr(snake_case_ , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
lowerCamelCase__ : List[Any] =getattr(snake_case_ , 'model.classification.activation.name' , 'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
lowerCamelCase__ : str =getattr(snake_case_ , 'model.segmentation.output_stride' , 1_6 )
if "_deeplabv3" in task_name:
lowerCamelCase__ : Dict =getattr(snake_case_ , 'model.segmentation.deeplabv3.aspp_rates' , [1_2, 2_4, 3_6] )
lowerCamelCase__ : str =getattr(snake_case_ , 'model.segmentation.deeplabv3.aspp_out_channels' , 5_1_2 )
lowerCamelCase__ : Dict =getattr(snake_case_ , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 )
# id2label
lowerCamelCase__ : Union[str, Any] ='huggingface/label-files'
lowerCamelCase__ : List[Any] =json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type='dataset' ) , 'r' ) )
lowerCamelCase__ : List[str] ={int(snake_case_ ): v for k, v in idalabel.items()}
lowerCamelCase__ : Optional[int] =idalabel
lowerCamelCase__ : List[str] ={v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : List[str] , snake_case_ : Tuple ) ->Tuple:
lowerCamelCase__ : List[Any] =dct.pop(snake_case_ )
lowerCamelCase__ : str =val
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Any=False ) ->str:
if base_model:
lowerCamelCase__ : Tuple =''
else:
lowerCamelCase__ : Optional[Any] ='mobilevitv2.'
lowerCamelCase__ : Tuple =[]
for k in state_dict.keys():
if k[:8] == "encoder.":
lowerCamelCase__ : Tuple =k[8:]
else:
lowerCamelCase__ : Tuple =k
if ".block." in k:
lowerCamelCase__ : Optional[Any] =k_new.replace('.block.' , '.' )
if ".conv." in k:
lowerCamelCase__ : Union[str, Any] =k_new.replace('.conv.' , '.convolution.' )
if ".norm." in k:
lowerCamelCase__ : Dict =k_new.replace('.norm.' , '.normalization.' )
if "conv_1." in k:
lowerCamelCase__ : Dict =k_new.replace('conv_1.' , f"""{model_prefix}conv_stem.""" )
for i in [1, 2]:
if f"""layer_{i}.""" in k:
lowerCamelCase__ : Union[str, Any] =k_new.replace(f"""layer_{i}.""" , f"""{model_prefix}encoder.layer.{i-1}.layer.""" )
if ".exp_1x1." in k:
lowerCamelCase__ : Tuple =k_new.replace('.exp_1x1.' , '.expand_1x1.' )
if ".red_1x1." in k:
lowerCamelCase__ : Union[str, Any] =k_new.replace('.red_1x1.' , '.reduce_1x1.' )
for i in [3, 4, 5]:
if f"""layer_{i}.0.""" in k:
lowerCamelCase__ : Tuple =k_new.replace(f"""layer_{i}.0.""" , f"""{model_prefix}encoder.layer.{i-1}.downsampling_layer.""" )
if f"""layer_{i}.1.local_rep.0.""" in k:
lowerCamelCase__ : Any =k_new.replace(f"""layer_{i}.1.local_rep.0.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_kxk.""" )
if f"""layer_{i}.1.local_rep.1.""" in k:
lowerCamelCase__ : Any =k_new.replace(f"""layer_{i}.1.local_rep.1.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_1x1.""" )
for i in [3, 4, 5]:
if i == 3:
lowerCamelCase__ : Optional[int] =[0, 1]
elif i == 4:
lowerCamelCase__ : int =[0, 1, 2, 3]
elif i == 5:
lowerCamelCase__ : List[str] =[0, 1, 2]
for j in j_in:
if f"""layer_{i}.1.global_rep.{j}.""" in k:
lowerCamelCase__ : Any =k_new.replace(
f"""layer_{i}.1.global_rep.{j}.""" , f"""{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.""" )
if f"""layer_{i}.1.global_rep.{j+1}.""" in k:
lowerCamelCase__ : Optional[Any] =k_new.replace(
f"""layer_{i}.1.global_rep.{j+1}.""" , f"""{model_prefix}encoder.layer.{i-1}.layernorm.""" )
if f"""layer_{i}.1.conv_proj.""" in k:
lowerCamelCase__ : Any =k_new.replace(f"""layer_{i}.1.conv_proj.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_projection.""" )
if "pre_norm_attn.0." in k:
lowerCamelCase__ : int =k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' )
if "pre_norm_attn.1." in k:
lowerCamelCase__ : Optional[int] =k_new.replace('pre_norm_attn.1.' , 'attention.' )
if "pre_norm_ffn.0." in k:
lowerCamelCase__ : List[str] =k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' )
if "pre_norm_ffn.1." in k:
lowerCamelCase__ : Any =k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
lowerCamelCase__ : Tuple =k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' )
if "classifier.1." in k:
lowerCamelCase__ : List[Any] =k_new.replace('classifier.1.' , 'classifier.' )
if "seg_head." in k:
lowerCamelCase__ : int =k_new.replace('seg_head.' , 'segmentation_head.' )
if ".aspp_layer." in k:
lowerCamelCase__ : List[str] =k_new.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in k:
lowerCamelCase__ : Optional[int] =k_new.replace('.aspp_pool.' , '.' )
rename_keys.append((k, k_new) )
return rename_keys
def lowerCAmelCase_ ( snake_case_ : Tuple ) ->str:
lowerCamelCase__ : str =[]
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(snake_case_ )
for k in keys_to_ignore:
state_dict.pop(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( ) ->Optional[int]:
lowerCamelCase__ : List[str] ='http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
lowerCamelCase__ : Any =Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Dict , snake_case_ : Any , snake_case_ : List[str] ) ->Optional[int]:
lowerCamelCase__ : int =get_mobilevitva_config(snake_case_ , snake_case_ )
# load original state_dict
lowerCamelCase__ : List[str] =torch.load(snake_case_ , map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
lowerCamelCase__ : List[str] =MobileViTVaForSemanticSegmentation(snake_case_ ).eval()
lowerCamelCase__ : Optional[Any] =False
else:
lowerCamelCase__ : Union[str, Any] =MobileViTVaForImageClassification(snake_case_ ).eval()
lowerCamelCase__ : List[Any] =False
# remove and rename some keys of load the original model
lowerCamelCase__ : Union[str, Any] =checkpoint
remove_unused_keys(snake_case_ )
lowerCamelCase__ : List[str] =create_rename_keys(snake_case_ , base_model=snake_case_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
# load modified state_dict
model.load_state_dict(snake_case_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase__ : str =MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 3_2 )
lowerCamelCase__ : int =image_processor(images=prepare_img() , return_tensors='pt' )
lowerCamelCase__ : Optional[int] =model(**snake_case_ )
# verify classification model
if task_name.startswith('imagenet' ):
lowerCamelCase__ : Tuple =outputs.logits
lowerCamelCase__ : Optional[int] =logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
lowerCamelCase__ : Tuple =torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] )
assert torch.allclose(logits[0, :3] , snake_case_ , atol=1E-4 )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f"""Saving model {task_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""",
default="""imagenet1k_256""",
type=str,
help=(
"""Name of the task for which the MobileViTV2 model you'd like to convert is trained on . """
"""
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
"""
),
choices=[
"""imagenet1k_256""",
"""imagenet1k_384""",
"""imagenet21k_to_1k_256""",
"""imagenet21k_to_1k_384""",
"""ade20k_deeplabv3""",
"""voc_deeplabv3""",
],
)
parser.add_argument(
"""--orig_checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument("""--orig_config_path""", required=True, type=str, help="""Path to the original config file.""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
lowerCAmelCase = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
) | 126 | 1 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCAmelCase_ ( ):
lowercase_ :Optional[int] = HfArgumentParser(lowerCAmelCase__ )
lowercase_ :int = parser.parse_args_into_dataclasses()[0]
lowercase_ :List[Any] = TensorFlowBenchmark(args=lowerCAmelCase__ )
try:
lowercase_ :Any = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase_ :List[str] = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
lowercase_ :Dict = """ """.join(str(lowerCAmelCase__ ).split(" " )[:-1] )
lowercase_ :Any = """"""
lowercase_ :List[Any] = eval(str(lowerCAmelCase__ ).split(" " )[-1] )
lowercase_ :Union[str, Any] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
lowercase_ :Tuple = full_error_msg + begin_error_msg + str(lowerCAmelCase__ )
raise ValueError(lowerCAmelCase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 366 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowerCamelCase : list ):
if len(__lowerCamelCase ) <= 1:
return lst
lowercase_ :Optional[Any] = 1
while i < len(__lowerCamelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
lowercase_ , lowercase_ :int = lst[i], lst[i - 1]
i -= 1
if i == 0:
lowercase_ :Dict = 1
return lst
if __name__ == "__main__":
lowerCAmelCase : Any =input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase : List[str] =[int(item) for item in user_input.split(''',''')]
print(gnome_sort(unsorted))
| 147 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__lowercase = logging.get_logger(__name__)
__lowercase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowercase = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
__lowercase = {
"""roberta-base""": 512,
"""roberta-large""": 512,
"""roberta-large-mnli""": 512,
"""distilroberta-base""": 512,
"""roberta-base-openai-detector""": 512,
"""roberta-large-openai-detector""": 512,
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : str = VOCAB_FILES_NAMES
UpperCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Any = ["""input_ids""", """attention_mask"""]
UpperCAmelCase : int = RobertaTokenizer
def __init__( self : Tuple , __UpperCAmelCase : Any=None , __UpperCAmelCase : str=None , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : List[Any]="replace" , __UpperCAmelCase : List[str]="<s>" , __UpperCAmelCase : int="</s>" , __UpperCAmelCase : List[str]="</s>" , __UpperCAmelCase : str="<s>" , __UpperCAmelCase : Dict="<unk>" , __UpperCAmelCase : Dict="<pad>" , __UpperCAmelCase : Dict="<mask>" , __UpperCAmelCase : int=False , __UpperCAmelCase : Dict=True , **__UpperCAmelCase : Dict , ):
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , errors=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase , **__UpperCAmelCase , )
a : Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space" , __UpperCAmelCase) != add_prefix_space:
a : Optional[Any] = getattr(__UpperCAmelCase , pre_tok_state.pop("type"))
a : Optional[int] = add_prefix_space
a : Dict = pre_tok_class(**__UpperCAmelCase)
a : Tuple = add_prefix_space
a : str = "post_processor"
a : Dict = getattr(self.backend_tokenizer , __UpperCAmelCase , __UpperCAmelCase)
if tokenizer_component_instance:
a : Union[str, Any] = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a : int = tuple(state["sep"])
if "cls" in state:
a : Any = tuple(state["cls"])
a : Dict = False
if state.get("add_prefix_space" , __UpperCAmelCase) != add_prefix_space:
a : Optional[int] = add_prefix_space
a : int = True
if state.get("trim_offsets" , __UpperCAmelCase) != trim_offsets:
a : List[str] = trim_offsets
a : List[str] = True
if changes_to_apply:
a : str = getattr(__UpperCAmelCase , state.pop("type"))
a : Optional[int] = component_class(**__UpperCAmelCase)
setattr(self.backend_tokenizer , __UpperCAmelCase , __UpperCAmelCase)
@property
def __snake_case ( self : List[Any]):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet.")
return None
return str(self._mask_token)
@mask_token.setter
def __snake_case ( self : int , __UpperCAmelCase : Any):
a : Any = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase) if isinstance(__UpperCAmelCase , __UpperCAmelCase) else value
a : str = value
def __snake_case ( self : Dict , *__UpperCAmelCase : int , **__UpperCAmelCase : Tuple):
a : Tuple = kwargs.get("is_split_into_words" , __UpperCAmelCase)
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase)
def __snake_case ( self : Optional[int] , *__UpperCAmelCase : Dict , **__UpperCAmelCase : List[Any]):
a : Dict = kwargs.get("is_split_into_words" , __UpperCAmelCase)
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase)
def __snake_case ( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None):
a : str = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase)
return tuple(__UpperCAmelCase)
def __snake_case ( self : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : Dict=None):
a : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __snake_case ( self : List[str] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None):
a : Tuple = [self.sep_token_id]
a : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 40 |
"""simple docstring"""
import os
import sys
import unittest
__lowercase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__lowercase = os.path.join(git_repo_path, """src""", """diffusers""")
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Any):
a : List[Any] = find_backend(" if not is_torch_available():")
self.assertEqual(__UpperCAmelCase , "torch")
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
a : Dict = find_backend(" if not (is_torch_available() and is_transformers_available()):")
self.assertEqual(__UpperCAmelCase , "torch_and_transformers")
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
a : int = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):")
self.assertEqual(__UpperCAmelCase , "torch_and_transformers_and_onnx")
def __snake_case ( self : Union[str, Any]):
a : Dict = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , __UpperCAmelCase)
self.assertIn("torch_and_transformers" , __UpperCAmelCase)
self.assertIn("flax_and_transformers" , __UpperCAmelCase)
self.assertIn("torch_and_transformers_and_onnx" , __UpperCAmelCase)
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"])
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"])
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"])
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"])
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"])
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"])
def __snake_case ( self : Tuple):
a : Optional[int] = create_dummy_object("CONSTANT" , "'torch'")
self.assertEqual(__UpperCAmelCase , "\nCONSTANT = None\n")
a : Dict = create_dummy_object("function" , "'torch'")
self.assertEqual(
__UpperCAmelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n")
a : Optional[Any] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
a : int = create_dummy_object("FakeClass" , "'torch'")
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : List[str]):
a : List[str] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
a : Tuple = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]})
self.assertEqual(dummy_files["torch"] , __UpperCAmelCase)
| 40 | 1 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
snake_case_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , '''num_attention_heads''' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , '''num_encoder_blocks''' ) )
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=64 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=[2, 2, 2, 2] , _UpperCAmelCase=[8, 4, 2, 1] , _UpperCAmelCase=[16, 32, 64, 1_28] , _UpperCAmelCase=[1, 4, 8, 16] , _UpperCAmelCase=[1, 2, 4, 8] , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=None , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = num_channels
snake_case_ = num_encoder_blocks
snake_case_ = sr_ratios
snake_case_ = depths
snake_case_ = hidden_sizes
snake_case_ = downsampling_rates
snake_case_ = num_attention_heads
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = scope
def UpperCamelCase__ ( self ):
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case_ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = SegformerModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ = model(_UpperCAmelCase )
snake_case_ = snake_case_ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = self.num_labels
snake_case_ = SegformerForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
snake_case_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = 1
snake_case_ = SegformerForSemanticSegmentation(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_UpperCAmelCase )
snake_case_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCamelCase__ ( self ):
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__snake_case = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__snake_case = True
__snake_case = False
__snake_case = False
__snake_case = False
def UpperCamelCase__ ( self ):
snake_case_ = SegformerModelTester(self )
snake_case_ = SegformerConfigTester(self , config_class=_UpperCAmelCase )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_UpperCAmelCase )
@unittest.skip('''SegFormer does not use inputs_embeds''' )
def UpperCamelCase__ ( self ):
pass
@unittest.skip('''SegFormer does not have get_input_embeddings method and get_output_embeddings methods''' )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(_UpperCAmelCase )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = True
for model_class in self.all_model_classes:
snake_case_ = True
snake_case_ = False
snake_case_ = True
snake_case_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
snake_case_ = outputs.attentions
snake_case_ = sum(self.model_tester.depths )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case_ = True
snake_case_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
snake_case_ = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# verify the first attentions (first block, first layer)
snake_case_ = (self.model_tester.image_size // 4) ** 2
snake_case_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
snake_case_ = (self.model_tester.image_size // 32) ** 2
snake_case_ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
snake_case_ = len(_UpperCAmelCase )
# Check attention is always last and order is fine
snake_case_ = True
snake_case_ = True
snake_case_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + 1 , len(_UpperCAmelCase ) )
snake_case_ = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# verify the first attentions (first block, first layer)
snake_case_ = (self.model_tester.image_size // 4) ** 2
snake_case_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def UpperCamelCase__ ( self ):
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
snake_case_ = outputs.hidden_states
snake_case_ = self.model_tester.num_encoder_blocks
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase__ ( self ):
if not self.model_tester.is_training:
return
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = True
for model_class in self.all_model_classes:
if model_class in get_values(_UpperCAmelCase ):
continue
snake_case_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
snake_case_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
snake_case_ = model(**_UpperCAmelCase ).loss
loss.backward()
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCamelCase__ ( self ):
pass
@slow
def UpperCamelCase__ ( self ):
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = SegformerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowerCAmelCase ()-> List[str]:
"""simple docstring"""
snake_case_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self ):
# only resize + normalize
snake_case_ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_UpperCAmelCase , align=_UpperCAmelCase , do_random_crop=_UpperCAmelCase )
snake_case_ = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
_UpperCAmelCase )
snake_case_ = prepare_img()
snake_case_ = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' )
snake_case_ = encoded_inputs.pixel_values.to(_UpperCAmelCase )
with torch.no_grad():
snake_case_ = model(_UpperCAmelCase )
snake_case_ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
snake_case_ = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def UpperCamelCase__ ( self ):
# only resize + normalize
snake_case_ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_UpperCAmelCase , align=_UpperCAmelCase , do_random_crop=_UpperCAmelCase )
snake_case_ = SegformerForSemanticSegmentation.from_pretrained(
'''nvidia/segformer-b1-finetuned-cityscapes-1024-1024''' ).to(_UpperCAmelCase )
snake_case_ = prepare_img()
snake_case_ = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' )
snake_case_ = encoded_inputs.pixel_values.to(_UpperCAmelCase )
with torch.no_grad():
snake_case_ = model(_UpperCAmelCase )
snake_case_ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
snake_case_ = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1E-1 ) )
@slow
def UpperCamelCase__ ( self ):
# only resize + normalize
snake_case_ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_UpperCAmelCase , align=_UpperCAmelCase , do_random_crop=_UpperCAmelCase )
snake_case_ = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
_UpperCAmelCase )
snake_case_ = prepare_img()
snake_case_ = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' )
snake_case_ = encoded_inputs.pixel_values.to(_UpperCAmelCase )
with torch.no_grad():
snake_case_ = model(_UpperCAmelCase )
snake_case_ = outputs.logits.detach().cpu()
snake_case_ = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(5_00, 3_00)] )
snake_case_ = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
snake_case_ = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase )
snake_case_ = torch.Size((1_28, 1_28) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase ) | 267 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
snake_case_ = ['''a''', '''b''', '''c''']
# Defaults to last layer if both are None
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , ['''c'''] )
self.assertEqual(_UpperCAmelCase , [2] )
# Out indices set to match out features
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(['''a''', '''c'''] , _UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , ['''a''', '''c'''] )
self.assertEqual(_UpperCAmelCase , [0, 2] )
# Out features set to match out indices
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(_UpperCAmelCase , [0, 2] , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , ['''a''', '''c'''] )
self.assertEqual(_UpperCAmelCase , [0, 2] )
# Out features selected from negative indices
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(_UpperCAmelCase , [-3, -1] , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , ['''a''', '''c'''] )
self.assertEqual(_UpperCAmelCase , [-3, -1] )
def UpperCamelCase__ ( self ):
# Stage names must be set
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , _UpperCAmelCase )
# Out features must be a list
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(('''a''', '''b''') , (0, 1) , ['''a''', '''b'''] )
# Out features must be a subset of stage names
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , ['''a'''] )
# Out indices must be a list or tuple
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(_UpperCAmelCase , 0 , ['''a''', '''b'''] )
# Out indices must be a subset of stage names
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(_UpperCAmelCase , (0, 1) , ['''a'''] )
# Out features and out indices must be the same length
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(['''a''', '''b'''] , (0,) , ['''a''', '''b''', '''c'''] )
# Out features should match out indices
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 2) , ['''a''', '''b''', '''c'''] )
# Out features and out indices should be in order
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(['''b''', '''a'''] , (0, 1) , ['''a''', '''b'''] )
# Check passes with valid inputs
verify_out_features_out_indices(['''a''', '''b''', '''d'''] , (0, 1, -1) , ['''a''', '''b''', '''c''', '''d'''] )
def UpperCamelCase__ ( self ):
snake_case_ = BackboneMixin()
snake_case_ = ['''a''', '''b''', '''c''']
snake_case_ = ['''a''', '''c''']
snake_case_ = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
snake_case_ = ['''a''', '''b''']
self.assertEqual(backbone.out_features , ['''a''', '''b'''] )
self.assertEqual(backbone.out_indices , [0, 1] )
snake_case_ = [-3, -1]
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [-3, -1] ) | 267 | 1 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __lowercase : Dict , __lowercase : Dict=13 , __lowercase : int=7 , __lowercase : Dict=False , __lowercase : List[str]=True , __lowercase : List[str]=False , __lowercase : Any=True , __lowercase : Union[str, Any]=33 , __lowercase : str=32 , __lowercase : str=5 , __lowercase : List[str]=4 , __lowercase : Any=37 , __lowercase : Dict="gelu" , __lowercase : int=0.1 , __lowercase : Tuple=0.1 , __lowercase : Dict=5_12 , __lowercase : int=16 , __lowercase : Union[str, Any]=2 , __lowercase : str=0.02 , __lowercase : Optional[Any]=3 , __lowercase : Optional[int]=4 , __lowercase : Optional[Any]=None , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : str =parent
SCREAMING_SNAKE_CASE__ : List[str] =batch_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =seq_length
SCREAMING_SNAKE_CASE__ : int =is_training
SCREAMING_SNAKE_CASE__ : int =use_input_mask
SCREAMING_SNAKE_CASE__ : List[str] =use_token_type_ids
SCREAMING_SNAKE_CASE__ : int =use_labels
SCREAMING_SNAKE_CASE__ : str =vocab_size
SCREAMING_SNAKE_CASE__ : List[str] =hidden_size
SCREAMING_SNAKE_CASE__ : Optional[int] =num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] =num_attention_heads
SCREAMING_SNAKE_CASE__ : Any =intermediate_size
SCREAMING_SNAKE_CASE__ : Tuple =hidden_act
SCREAMING_SNAKE_CASE__ : Any =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Any =type_vocab_size
SCREAMING_SNAKE_CASE__ : str =type_sequence_label_size
SCREAMING_SNAKE_CASE__ : int =initializer_range
SCREAMING_SNAKE_CASE__ : Dict =num_labels
SCREAMING_SNAKE_CASE__ : Dict =num_choices
SCREAMING_SNAKE_CASE__ : str =scope
def __magic_name__ ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : str =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : str =None
SCREAMING_SNAKE_CASE__ : Any =None
SCREAMING_SNAKE_CASE__ : Dict =None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : List[str] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Any =ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : Tuple ) -> Union[str, Any]:
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __magic_name__ ( self : str , __lowercase : Any , __lowercase : Optional[Any] , __lowercase : int , __lowercase : int , __lowercase : Optional[Any] , __lowercase : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] =EsmModel(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Any =model(__lowercase , attention_mask=__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =model(__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__ ( self : str , __lowercase : Tuple , __lowercase : Any , __lowercase : str , __lowercase : str , __lowercase : List[str] , __lowercase : int ) -> Tuple:
SCREAMING_SNAKE_CASE__ : List[Any] =EsmForMaskedLM(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] =model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : List[str] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : Any , __lowercase : List[str] , __lowercase : Tuple , __lowercase : int ) -> int:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.num_labels
SCREAMING_SNAKE_CASE__ : Any =EsmForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : Any =config_and_inputs
SCREAMING_SNAKE_CASE__ : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
snake_case_ = False
snake_case_ = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = ()
snake_case_ = (
{
"""feature-extraction""": EsmModel,
"""fill-mask""": EsmForMaskedLM,
"""text-classification""": EsmForSequenceClassification,
"""token-classification""": EsmForTokenClassification,
"""zero-shot""": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = True
def __magic_name__ ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE__ : Dict =EsmModelTester(self )
SCREAMING_SNAKE_CASE__ : Any =ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def __magic_name__ ( self : Tuple ) -> Optional[int]:
self.config_tester.run_common_tests()
def __magic_name__ ( self : Any ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def __magic_name__ ( self : Dict ) -> int:
SCREAMING_SNAKE_CASE__ : Any =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE__ : Tuple =type
self.model_tester.create_and_check_model(*__lowercase )
def __magic_name__ ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowercase )
def __magic_name__ ( self : Dict ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowercase )
@slow
def __magic_name__ ( self : Union[str, Any] ) -> Dict:
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Optional[int] =EsmModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def __magic_name__ ( self : Any ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.model_tester.prepare_config_and_inputs()[0]
SCREAMING_SNAKE_CASE__ : Tuple =EsmEmbeddings(config=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =torch.as_tensor([[12, 31, 13, model.padding_idx]] )
SCREAMING_SNAKE_CASE__ : Any =torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
SCREAMING_SNAKE_CASE__ : Any =create_position_ids_from_input_ids(__lowercase , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__lowercase , __lowercase ) ) )
def __magic_name__ ( self : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Tuple =self.model_tester.prepare_config_and_inputs()[0]
SCREAMING_SNAKE_CASE__ : Optional[int] =EsmEmbeddings(config=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =torch.empty(2 , 4 , 30 )
SCREAMING_SNAKE_CASE__ : List[Any] =[
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
SCREAMING_SNAKE_CASE__ : Any =torch.as_tensor([expected_single_positions, expected_single_positions] )
SCREAMING_SNAKE_CASE__ : Optional[int] =embeddings.create_position_ids_from_inputs_embeds(__lowercase )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__lowercase , __lowercase ) ) )
@unittest.skip('''Esm does not support embedding resizing''' )
def __magic_name__ ( self : Union[str, Any] ) -> List[str]:
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def __magic_name__ ( self : Any ) -> Optional[Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __magic_name__ ( self : int ) -> Optional[Any]:
pass
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
@slow
def __magic_name__ ( self : int ) -> str:
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] =EsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE__ : str =model(__lowercase )[0]
SCREAMING_SNAKE_CASE__ : Tuple =33
SCREAMING_SNAKE_CASE__ : List[Any] =torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowercase , atol=1e-4 ) )
@slow
def __magic_name__ ( self : int ) -> str:
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] =EsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
SCREAMING_SNAKE_CASE__ : Dict =model(__lowercase )[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__ : str =torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowercase , atol=1e-4 ) ) | 152 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = CanineTokenizer
snake_case_ = False
def __magic_name__ ( self : Any ) -> List[Any]:
super().setUp()
SCREAMING_SNAKE_CASE__ : int =CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __magic_name__ ( self : Optional[int] ) -> List[str]:
return CanineTokenizer.from_pretrained('''google/canine-s''' )
def __magic_name__ ( self : Optional[int] , **__lowercase : int ) -> CanineTokenizer:
SCREAMING_SNAKE_CASE__ : int =self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =10_24
return tokenizer
@require_torch
def __magic_name__ ( self : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.canine_tokenizer
SCREAMING_SNAKE_CASE__ : Union[str, Any] =['''Life is like a box of chocolates.''', '''You never know what you\'re gonna get.''']
# fmt: off
SCREAMING_SNAKE_CASE__ : List[Any] =[5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tokenizer(__lowercase , padding=__lowercase , return_tensors='''pt''' )
self.assertIsInstance(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =list(batch.input_ids.numpy()[0] )
self.assertListEqual(__lowercase , __lowercase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def __magic_name__ ( self : Any ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Dict =self.canine_tokenizer
SCREAMING_SNAKE_CASE__ : str =['''Once there was a man.''', '''He wrote a test in HuggingFace Tranformers.''']
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer(__lowercase , padding=__lowercase , return_tensors='''pt''' )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('''input_ids''' , __lowercase )
self.assertIn('''attention_mask''' , __lowercase )
self.assertIn('''token_type_ids''' , __lowercase )
@require_torch
def __magic_name__ ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ : List[str] =self.canine_tokenizer
SCREAMING_SNAKE_CASE__ : Dict =[
'''What\'s the weater?''',
'''It\'s about 25 degrees.''',
]
SCREAMING_SNAKE_CASE__ : int =tokenizer(
text_target=__lowercase , max_length=32 , padding='''max_length''' , truncation=__lowercase , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def __magic_name__ ( self : List[str] ) -> Any:
# safety check on max_len default value so we are sure the test works
SCREAMING_SNAKE_CASE__ : str =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
SCREAMING_SNAKE_CASE__ : int =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE__ : List[str] =tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Dict =''' He is very happy, UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
tokenizer.save_pretrained(__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.__class__.from_pretrained(__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =after_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
shutil.rmtree(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =''' He is very happy, UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
SCREAMING_SNAKE_CASE__ : str =chr(0xE007 )
additional_special_tokens.append(__lowercase )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
tokenizer.save_pretrained(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.__class__.from_pretrained(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =after_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertIn(__lowercase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.__class__.from_pretrained(__lowercase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__lowercase )
def __magic_name__ ( self : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Tuple =self.get_tokenizers(do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =self.get_clean_sequence(__lowercase )
# a special token for Canine can be defined as follows:
SCREAMING_SNAKE_CASE__ : Optional[int] =0xE005
SCREAMING_SNAKE_CASE__ : Any =chr(__lowercase )
tokenizer.add_special_tokens({'''cls_token''': special_token} )
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertEqual(len(__lowercase ) , 1 )
SCREAMING_SNAKE_CASE__ : str =tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertEqual(__lowercase , input_encoded + special_token_id )
SCREAMING_SNAKE_CASE__ : Any =tokenizer.decode(__lowercase , skip_special_tokens=__lowercase )
self.assertTrue(special_token not in decoded )
def __magic_name__ ( self : int ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Tuple =self.get_tokenizers(do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE__ : Tuple =chr(0xE005 )
SCREAMING_SNAKE_CASE__ : List[Any] =chr(0xE006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__lowercase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'''additional_special_tokens''': [SPECIAL_TOKEN_2]} )
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.tokenize(__lowercase )
SCREAMING_SNAKE_CASE__ : Any =tokenizer.tokenize(__lowercase )
self.assertEqual(len(__lowercase ) , 1 )
self.assertEqual(len(__lowercase ) , 1 )
self.assertEqual(token_a[0] , __lowercase )
self.assertEqual(token_a[0] , __lowercase )
@require_tokenizers
def __magic_name__ ( self : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Tuple =self.get_tokenizers(do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# a special token for Canine can be defined as follows:
SCREAMING_SNAKE_CASE__ : str =0xE006
SCREAMING_SNAKE_CASE__ : int =chr(__lowercase )
SCREAMING_SNAKE_CASE__ : Dict =AddedToken(__lowercase , lstrip=__lowercase )
tokenizer.add_special_tokens({'''additional_special_tokens''': [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__lowercase )
tokenizer.from_pretrained(__lowercase )
def __magic_name__ ( self : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE__ : int =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowercase )
with open(os.path.join(__lowercase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
SCREAMING_SNAKE_CASE__ : List[Any] =json.load(__lowercase )
with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
SCREAMING_SNAKE_CASE__ : Dict =json.load(__lowercase )
# a special token for Canine can be defined as follows:
SCREAMING_SNAKE_CASE__ : Optional[Any] =0xE006
SCREAMING_SNAKE_CASE__ : Dict =chr(__lowercase )
SCREAMING_SNAKE_CASE__ : str =[new_token_a]
SCREAMING_SNAKE_CASE__ : Optional[Any] =[new_token_a]
with open(os.path.join(__lowercase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowercase , __lowercase )
with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowercase , __lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer_class.from_pretrained(__lowercase , extra_ids=0 )
self.assertIn(__lowercase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
SCREAMING_SNAKE_CASE__ : str =0xE007
SCREAMING_SNAKE_CASE__ : Optional[int] =chr(__lowercase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
SCREAMING_SNAKE_CASE__ : Tuple =[AddedToken(__lowercase , lstrip=__lowercase )]
SCREAMING_SNAKE_CASE__ : Any =tokenizer_class.from_pretrained(
__lowercase , additional_special_tokens=__lowercase , extra_ids=0 )
self.assertIn(__lowercase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def __magic_name__ ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : int =self.get_tokenizers(do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE__ : List[str] ='''hello world'''
if self.space_between_special_tokens:
SCREAMING_SNAKE_CASE__ : str ='''[CLS] hello world [SEP]'''
else:
SCREAMING_SNAKE_CASE__ : int =input
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
SCREAMING_SNAKE_CASE__ : str =tokenizer.decode(__lowercase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__lowercase , [output, output.lower()] )
def __magic_name__ ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE__ : str =[
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
SCREAMING_SNAKE_CASE__ : Tuple ='''a'''
SCREAMING_SNAKE_CASE__ : Tuple =ord(__lowercase )
for attr in attributes_list:
setattr(__lowercase , attr + '''_id''' , __lowercase )
self.assertEqual(getattr(__lowercase , __lowercase ) , __lowercase )
self.assertEqual(getattr(__lowercase , attr + '''_id''' ) , __lowercase )
setattr(__lowercase , attr + '''_id''' , __lowercase )
self.assertEqual(getattr(__lowercase , __lowercase ) , __lowercase )
self.assertEqual(getattr(__lowercase , attr + '''_id''' ) , __lowercase )
setattr(__lowercase , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(__lowercase , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(__lowercase , '''additional_special_tokens_ids''' ) , [] )
SCREAMING_SNAKE_CASE__ : str =0xE006
SCREAMING_SNAKE_CASE__ : List[str] =chr(__lowercase )
setattr(__lowercase , '''additional_special_tokens_ids''' , [additional_special_token_id] )
self.assertListEqual(getattr(__lowercase , '''additional_special_tokens''' ) , [additional_special_token] )
self.assertListEqual(getattr(__lowercase , '''additional_special_tokens_ids''' ) , [additional_special_token_id] )
def __magic_name__ ( self : str ) -> Dict:
pass
def __magic_name__ ( self : List[Any] ) -> List[Any]:
pass
def __magic_name__ ( self : Any ) -> int:
pass
def __magic_name__ ( self : Union[str, Any] ) -> Union[str, Any]:
pass
def __magic_name__ ( self : List[Any] ) -> Optional[int]:
pass
def __magic_name__ ( self : Tuple ) -> Optional[Any]:
pass
def __magic_name__ ( self : Dict ) -> Dict:
pass
def __magic_name__ ( self : List[str] ) -> Dict:
pass | 152 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__a = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowercase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowercase = field(
default=_a , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowercase = field(
default="NER" , metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} )
lowercase = field(
default=_a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowercase = field(default=_a , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowercase = field(
default=_a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowercase = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} )
lowercase = field(
default=_a , metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} , )
lowercase = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowercase = field(
default=_a , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def __snake_case( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
""" --overwrite_output_dir to overcome.""" )
snake_case__ = import_module("""tasks""" )
try:
snake_case__ = getattr(_lowerCAmelCase , model_args.task_type )
snake_case__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
f"Available tasks classes are: {TokenClassificationTask.__subclasses__()}" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , _lowerCAmelCase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
snake_case__ = token_classification_task.get_labels(data_args.labels )
snake_case__ = dict(enumerate(_lowerCAmelCase ) )
snake_case__ = len(_lowerCAmelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowerCAmelCase , idalabel=_lowerCAmelCase , labelaid={label: i for i, label in enumerate(_lowerCAmelCase )} , cache_dir=model_args.cache_dir , )
snake_case__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
snake_case__ = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , )
# Get datasets
snake_case__ = (
TokenClassificationDataset(
token_classification_task=_lowerCAmelCase , data_dir=data_args.data_dir , tokenizer=_lowerCAmelCase , labels=_lowerCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
snake_case__ = (
TokenClassificationDataset(
token_classification_task=_lowerCAmelCase , data_dir=data_args.data_dir , tokenizer=_lowerCAmelCase , labels=_lowerCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(_lowerCAmelCase , _lowerCAmelCase ) -> Tuple[List[int], List[int]]:
snake_case__ = np.argmax(_lowerCAmelCase , axis=2 )
snake_case__ = preds.shape
snake_case__ = [[] for _ in range(_lowerCAmelCase )]
snake_case__ = [[] for _ in range(_lowerCAmelCase )]
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(_lowerCAmelCase ) -> Dict:
snake_case__ = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(_lowerCAmelCase , _lowerCAmelCase ),
"precision": precision_score(_lowerCAmelCase , _lowerCAmelCase ),
"recall": recall_score(_lowerCAmelCase , _lowerCAmelCase ),
"f1": fa_score(_lowerCAmelCase , _lowerCAmelCase ),
}
# Data collator
snake_case__ = DataCollatorWithPadding(_lowerCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case__ = Trainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , train_dataset=_lowerCAmelCase , eval_dataset=_lowerCAmelCase , compute_metrics=_lowerCAmelCase , data_collator=_lowerCAmelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case__ = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
snake_case__ = trainer.evaluate()
snake_case__ = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_process_zero():
with open(_lowerCAmelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , _lowerCAmelCase , _lowerCAmelCase )
writer.write("""%s = %s\n""" % (key, value) )
results.update(_lowerCAmelCase )
# Predict
if training_args.do_predict:
snake_case__ = TokenClassificationDataset(
token_classification_task=_lowerCAmelCase , data_dir=data_args.data_dir , tokenizer=_lowerCAmelCase , labels=_lowerCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
snake_case__ = trainer.predict(_lowerCAmelCase )
snake_case__ = align_predictions(_lowerCAmelCase , _lowerCAmelCase )
snake_case__ = os.path.join(training_args.output_dir , """test_results.txt""" )
if trainer.is_world_process_zero():
with open(_lowerCAmelCase , """w""" ) as writer:
for key, value in metrics.items():
logger.info(""" %s = %s""" , _lowerCAmelCase , _lowerCAmelCase )
writer.write("""%s = %s\n""" % (key, value) )
# Save predictions
snake_case__ = os.path.join(training_args.output_dir , """test_predictions.txt""" )
if trainer.is_world_process_zero():
with open(_lowerCAmelCase , """w""" ) as writer:
with open(os.path.join(data_args.data_dir , """test.txt""" ) , """r""" ) as f:
token_classification_task.write_predictions_to_file(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return results
def __snake_case( _lowerCAmelCase ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 367 |
'''simple docstring'''
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
lowercase = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Tuple=0 ):
snake_case__ : Any = floats_tensor((1, 3, 128, 128) , rng=random.Random(snake_case_ ) )
snake_case__ : List[str] = np.random.RandomState(snake_case_ )
snake_case__ : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""strength""": 0.75,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Tuple = self.get_dummy_inputs()
snake_case__ : Union[str, Any] = pipe(**snake_case_ ).images
snake_case__ : List[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
snake_case__ : int = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def lowerCamelCase ( self : Dict ):
snake_case__ : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
snake_case__ : int = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Dict = self.get_dummy_inputs()
snake_case__ : int = pipe(**snake_case_ ).images
snake_case__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case__ : Tuple = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
snake_case__ : Dict = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
# warmup pass to apply optimizations
snake_case__ : List[Any] = pipe(**self.get_dummy_inputs() )
snake_case__ : List[str] = self.get_dummy_inputs()
snake_case__ : Optional[int] = pipe(**snake_case_ ).images
snake_case__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case__ : Any = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase ( self : str ):
snake_case__ : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
snake_case__ : Dict = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Union[str, Any] = self.get_dummy_inputs()
snake_case__ : List[Any] = pipe(**snake_case_ ).images
snake_case__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case__ : Optional[Any] = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase ( self : str ):
snake_case__ : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
snake_case__ : List[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Tuple = self.get_dummy_inputs()
snake_case__ : Tuple = pipe(**snake_case_ ).images
snake_case__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case__ : int = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase ( self : Dict ):
snake_case__ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
snake_case__ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : List[str] = self.get_dummy_inputs()
snake_case__ : List[str] = pipe(**snake_case_ ).images
snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case__ : List[str] = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@property
def lowerCamelCase ( self : Dict ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase ( self : Dict ):
snake_case__ : Tuple = ort.SessionOptions()
snake_case__ : Optional[Any] = False
return options
def lowerCamelCase ( self : List[str] ):
snake_case__ : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
snake_case__ : str = init_image.resize((768, 512) )
# using the PNDM scheduler by default
snake_case__ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Dict = """A fantasy landscape, trending on artstation"""
snake_case__ : str = np.random.RandomState(0 )
snake_case__ : Union[str, Any] = pipe(
prompt=snake_case_ , image=snake_case_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case_ , output_type="""np""" , )
snake_case__ : str = output.images
snake_case__ : Optional[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case__ : Optional[Any] = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def lowerCamelCase ( self : int ):
snake_case__ : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
snake_case__ : List[Any] = init_image.resize((768, 512) )
snake_case__ : Tuple = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
snake_case__ : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=snake_case_ , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Union[str, Any] = """A fantasy landscape, trending on artstation"""
snake_case__ : Optional[int] = np.random.RandomState(0 )
snake_case__ : Optional[int] = pipe(
prompt=snake_case_ , image=snake_case_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case_ , output_type="""np""" , )
snake_case__ : Any = output.images
snake_case__ : Tuple = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case__ : Tuple = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 43 | 0 |
from math import isqrt
def UpperCamelCase ( _A ):
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2, isqrt(_A ) + 1 ) )
def UpperCamelCase ( _A = 10**6 ):
"""simple docstring"""
__magic_name__ : List[str] = 0
__magic_name__ : Any = 1
__magic_name__ : Optional[int] = 7
while prime_candidate < max_prime:
primes_count += is_prime(_A )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 342 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__: Dict = logging.get_logger(__name__)
__magic_name__: List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
__magic_name__: Optional[Any] = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
__magic_name__: List[Any] = {
"facebook/bart-base": 1_024,
"facebook/bart-large": 1_024,
"facebook/bart-large-mnli": 1_024,
"facebook/bart-large-cnn": 1_024,
"facebook/bart-large-xsum": 1_024,
"yjernite/bart_eli5": 1_024,
}
@lru_cache()
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : Any = (
list(range(ord("""!""" ), ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ), ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ), ord("""ÿ""" ) + 1 ) )
)
__magic_name__ : Any = bs[:]
__magic_name__ : Dict = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_A )
cs.append(2**8 + n )
n += 1
__magic_name__ : List[str] = [chr(_A ) for n in cs]
return dict(zip(_A, _A ) )
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : str = set()
__magic_name__ : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__magic_name__ : List[Any] = char
return pairs
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Union[str, Any] = VOCAB_FILES_NAMES
lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="replace" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Dict:
__magic_name__ : Tuple = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
__magic_name__ : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
__magic_name__ : str = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
__magic_name__ : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
__magic_name__ : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
__magic_name__ : Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__magic_name__ : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as vocab_handle:
__magic_name__ : Union[str, Any] = json.load(lowerCAmelCase__ )
__magic_name__ : Any = {v: k for k, v in self.encoder.items()}
__magic_name__ : Tuple = errors # how to handle errors in decoding
__magic_name__ : Tuple = bytes_to_unicode()
__magic_name__ : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as merges_handle:
__magic_name__ : Optional[Any] = merges_handle.read().split("""\n""" )[1:-1]
__magic_name__ : Dict = [tuple(merge.split() ) for merge in bpe_merges]
__magic_name__ : int = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__magic_name__ : str = {}
__magic_name__ : int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__magic_name__ : Union[str, Any] = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def __magic_name__ ( self ) -> Optional[Any]:
return len(self.encoder )
def __magic_name__ ( self ) -> Optional[int]:
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ ( self , lowerCAmelCase__ ) -> str:
if token in self.cache:
return self.cache[token]
__magic_name__ : Union[str, Any] = tuple(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
__magic_name__ : Union[str, Any] = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__magic_name__ ,__magic_name__ : List[str] = bigram
__magic_name__ : Any = []
__magic_name__ : Any = 0
while i < len(lowerCAmelCase__ ):
try:
__magic_name__ : str = word.index(lowerCAmelCase__ , lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__magic_name__ : Optional[Any] = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__magic_name__ : str = tuple(lowerCAmelCase__ )
__magic_name__ : Optional[int] = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
__magic_name__ : List[str] = get_pairs(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = """ """.join(lowerCAmelCase__ )
__magic_name__ : str = word
return word
def __magic_name__ ( self , lowerCAmelCase__ ) -> Tuple:
__magic_name__ : str = []
for token in re.findall(self.pat , lowerCAmelCase__ ):
__magic_name__ : List[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(""" """ ) )
return bpe_tokens
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.decoder.get(lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Any:
__magic_name__ : Tuple = """""".join(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__magic_name__ : Tuple = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__magic_name__ : List[str] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + """\n""" )
__magic_name__ : Optional[Any] = 0
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
__magic_name__ : Optional[int] = token_index
writer.write(""" """.join(lowerCAmelCase__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__magic_name__ : List[str] = [self.cls_token_id]
__magic_name__ : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
__magic_name__ : Dict = [self.sep_token_id]
__magic_name__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False , **lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : Any = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
__magic_name__ : List[Any] = """ """ + text
return (text, kwargs)
| 342 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : List[Any] = logging.get_logger(__name__)
A : List[Any] = {
'microsoft/beit-base-patch16-224-pt22k': (
'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __A( a ):
snake_case_ = '''beit'''
def __init__( self , _snake_case=8_192 , _snake_case=768 , _snake_case=12 , _snake_case=12 , _snake_case=3_072 , _snake_case="gelu" , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=1E-12 , _snake_case=224 , _snake_case=16 , _snake_case=3 , _snake_case=False , _snake_case=False , _snake_case=False , _snake_case=False , _snake_case=0.1 , _snake_case=0.1 , _snake_case=True , _snake_case=[3, 5, 7, 11] , _snake_case=[1, 2, 3, 6] , _snake_case=True , _snake_case=0.4 , _snake_case=256 , _snake_case=1 , _snake_case=False , _snake_case=255 , **_snake_case , ) -> List[str]:
'''simple docstring'''
super().__init__(**_snake_case )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = image_size
__a = patch_size
__a = num_channels
__a = use_mask_token
__a = use_absolute_position_embeddings
__a = use_relative_position_bias
__a = use_shared_relative_position_bias
__a = layer_scale_init_value
__a = drop_path_rate
__a = use_mean_pooling
# decode head attributes (semantic segmentation)
__a = out_indices
__a = pool_scales
# auxiliary head attributes (semantic segmentation)
__a = use_auxiliary_head
__a = auxiliary_loss_weight
__a = auxiliary_channels
__a = auxiliary_num_convs
__a = auxiliary_concat_input
__a = semantic_loss_ignore_index
class __A( a ):
snake_case_ = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> float:
'''simple docstring'''
return 1E-4 | 33 |
import os
import numpy
import onnx
def __lowerCAmelCase ( a__ , a__ ) -> List[str]:
__a = a.name
__a = b.name
__a = ''''''
__a = ''''''
__a = a == b
__a = name_a
__a = name_b
return res
def __lowerCAmelCase ( a__ , a__ , a__ ) -> Optional[Any]:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(a__ , a__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , a__ , a__ )
_graph_replace_input_with(node_proto.attribute[1].g , a__ , a__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , a__ , a__ )
def __lowerCAmelCase ( a__ , a__ , a__ ) -> str:
for n in graph_proto.node:
_node_replace_input_with(a__ , a__ , a__ )
def __lowerCAmelCase ( a__ , a__ , a__ ) -> Union[str, Any]:
__a = list(model.graph.initializer )
__a = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__a = inits[i].name
__a = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , a__ , a__ )
def __lowerCAmelCase ( a__ ) -> str:
__a = os.path.dirname(a__ )
__a = os.path.basename(a__ )
__a = onnx.load(os.path.join(a__ , a__ ) )
__a = list(model.graph.initializer )
__a = set()
__a = {}
__a = []
__a = 0
for i in range(len(a__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(a__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(a__ )
dup_set.add(a__ )
__a = inits[j].data_type
__a = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''' , a__ )
total_reduced_size += mem_size
__a = inits[i].name
__a = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(a__ )
else:
__a = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 1024 / 1024 / 1024 , '''GB''' )
__a = sorted(a__ )
_remove_dup_initializers_from_model(a__ , a__ , a__ )
__a = '''optimized_''' + model_file_name
__a = os.path.join(a__ , a__ )
onnx.save(a__ , a__ )
return new_model | 33 | 1 |
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class SCREAMING_SNAKE_CASE ( tf.keras.optimizers.schedules.LearningRateSchedule ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : float , UpperCamelCase__ : Callable , UpperCamelCase__ : int , UpperCamelCase__ : float = 1.0 , UpperCamelCase__ : str = None , ):
"""simple docstring"""
super().__init__()
UpperCamelCase = initial_learning_rate
UpperCamelCase = warmup_steps
UpperCamelCase = power
UpperCamelCase = decay_schedule_fn
UpperCamelCase = name
def __call__( self : List[str] , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
UpperCamelCase = tf.cast(UpperCamelCase__ , tf.floataa )
UpperCamelCase = tf.cast(self.warmup_steps , tf.floataa )
UpperCamelCase = global_step_float / warmup_steps_float
UpperCamelCase = self.initial_learning_rate * tf.math.pow(UpperCamelCase__ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=UpperCamelCase__ , )
def A ( self : List[Any] ):
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def __lowerCamelCase ( A__ , A__ , A__ , A__ = 0.0 , A__ = 0.9 , A__ = 0.999 , A__ = 1e-8 , A__ = None , A__ = None , A__ = 0.0 , A__ = 1.0 , A__ = None , ) -> str:
"""simple docstring"""
UpperCamelCase = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=A__ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=A__ , )
if num_warmup_steps:
UpperCamelCase = WarmUp(
initial_learning_rate=A__ , decay_schedule_fn=A__ , warmup_steps=A__ , )
if weight_decay_rate > 0.0:
UpperCamelCase = AdamWeightDecay(
learning_rate=A__ , weight_decay_rate=A__ , beta_a=A__ , beta_a=A__ , epsilon=A__ , clipnorm=A__ , global_clipnorm=A__ , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=A__ , )
else:
UpperCamelCase = tf.keras.optimizers.Adam(
learning_rate=A__ , beta_a=A__ , beta_a=A__ , epsilon=A__ , clipnorm=A__ , global_clipnorm=A__ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : str , UpperCamelCase__ : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.0_0_1 , UpperCamelCase__ : float = 0.9 , UpperCamelCase__ : float = 0.9_9_9 , UpperCamelCase__ : float = 1E-7 , UpperCamelCase__ : bool = False , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : Optional[List[str]] = None , UpperCamelCase__ : Optional[List[str]] = None , UpperCamelCase__ : str = "AdamWeightDecay" , **UpperCamelCase__ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
UpperCamelCase = weight_decay_rate
UpperCamelCase = include_in_weight_decay
UpperCamelCase = exclude_from_weight_decay
@classmethod
def A ( cls : Tuple , UpperCamelCase__ : Any ):
"""simple docstring"""
UpperCamelCase = {'WarmUp': WarmUp}
return super(UpperCamelCase__ , cls ).from_config(UpperCamelCase__ , custom_objects=UpperCamelCase__ )
def A ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
super(UpperCamelCase__ , self )._prepare_local(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def A ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ):
"""simple docstring"""
UpperCamelCase = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def A ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : Any ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = list(zip(*UpperCamelCase__ ) )
return super(UpperCamelCase__ , self ).apply_gradients(zip(UpperCamelCase__ , UpperCamelCase__ ) , name=UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Dict ):
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
UpperCamelCase = apply_state or {}
UpperCamelCase = apply_state.get((var_device, var_dtype) )
if coefficients is None:
UpperCamelCase = self._fallback_apply_state(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def A ( self : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]=None ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase__ )
UpperCamelCase = self._decay_weights_op(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
with tf.control_dependencies([decay] ):
return super(UpperCamelCase__ , self )._resource_apply_dense(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any=None ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase__ )
UpperCamelCase = self._decay_weights_op(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
with tf.control_dependencies([decay] ):
return super(UpperCamelCase__ , self )._resource_apply_sparse(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def A ( self : Any , UpperCamelCase__ : Dict ):
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(UpperCamelCase__ , UpperCamelCase__ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(UpperCamelCase__ , UpperCamelCase__ ) is not None:
return False
return True
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : int ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = None
@property
def A ( self : List[str] ):
"""simple docstring"""
if self._accum_steps is None:
UpperCamelCase = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=UpperCamelCase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def A ( self : Dict ):
"""simple docstring"""
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : Optional[int] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
if not self._gradients:
UpperCamelCase = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(UpperCamelCase__ ) , trainable=UpperCamelCase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(UpperCamelCase__ ) != len(self._gradients ):
raise ValueError(f"""Expected {len(self._gradients )} gradients, but got {len(UpperCamelCase__ )}""" )
for accum_gradient, gradient in zip(self._gradients , UpperCamelCase__ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(UpperCamelCase__ )
self._accum_steps.assign_add(1 )
def A ( self : str ):
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(UpperCamelCase__ ) )
| 28 |
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 28 | 1 |
"""simple docstring"""
from math import factorial, pi
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = 30 ) -> float:
if not isinstance(__lowerCamelCase , (int, float) ):
raise ValueError('''maclaurin_sin() requires either an int or float for theta''' )
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or accuracy <= 0:
raise ValueError('''maclaurin_sin() requires a positive int for accuracy''' )
lowercase__ : Union[str, Any] = float(__lowerCamelCase )
lowercase__ : Any = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__lowerCamelCase ) )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = 30 ) -> float:
if not isinstance(__lowerCamelCase , (int, float) ):
raise ValueError('''maclaurin_cos() requires either an int or float for theta''' )
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or accuracy <= 0:
raise ValueError('''maclaurin_cos() requires a positive int for accuracy''' )
lowercase__ : int = float(__lowerCamelCase )
lowercase__ : Tuple = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 370 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = ["image_processor", "tokenizer"]
lowerCAmelCase : int = "ChineseCLIPImageProcessor"
lowerCAmelCase : str = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Tuple ,_snake_case : str=None ,_snake_case : Union[str, Any]=None ,**_snake_case : str ) -> Any:
"""simple docstring"""
lowercase__ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,_snake_case ,)
lowercase__ : Tuple = kwargs.pop('''feature_extractor''' )
lowercase__ : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_snake_case ,_snake_case )
lowercase__ : List[Any] = self.image_processor
def __call__( self : List[Any] ,_snake_case : Optional[int]=None ,_snake_case : Dict=None ,_snake_case : List[Any]=None ,**_snake_case : List[str] ) -> List[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowercase__ : str = self.tokenizer(_snake_case ,return_tensors=_snake_case ,**_snake_case )
if images is not None:
lowercase__ : str = self.image_processor(_snake_case ,return_tensors=_snake_case ,**_snake_case )
if text is not None and images is not None:
lowercase__ : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) ,tensor_type=_snake_case )
def UpperCAmelCase ( self : Any ,*_snake_case : List[Any] ,**_snake_case : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case ,**_snake_case )
def UpperCAmelCase ( self : Union[str, Any] ,*_snake_case : Tuple ,**_snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*_snake_case ,**_snake_case )
@property
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[str] = self.tokenizer.model_input_names
lowercase__ : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' ,_snake_case ,)
return self.image_processor_class
| 302 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __A ( a , a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : Tuple =CycleDiffusionPipeline
UpperCamelCase__ : Any =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""negative_prompt""",
"""height""",
"""width""",
"""negative_prompt_embeds""",
}
UpperCamelCase__ : Any =PipelineTesterMixin.required_optional_params - {"""latents"""}
UpperCamelCase__ : Union[str, Any] =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""source_prompt"""} )
UpperCamelCase__ : Optional[Any] =IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase__ : Union[str, Any] =IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__UpperCamelCase : List[Any] =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
__UpperCamelCase : int =DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , num_train_timesteps=1000 , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , )
torch.manual_seed(0 )
__UpperCamelCase : Union[str, Any] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCamelCase : List[Any] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__UpperCamelCase : Any =CLIPTextModel(lowerCamelCase__ )
__UpperCamelCase : Any =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__UpperCamelCase : str ={
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=0 ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
__UpperCamelCase : List[str] =image / 2 + 0.5
if str(lowerCamelCase__ ).startswith('mps' ):
__UpperCamelCase : int =torch.manual_seed(lowerCamelCase__ )
else:
__UpperCamelCase : Tuple =torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__UpperCamelCase : Optional[Any] ={
'prompt': 'An astronaut riding an elephant',
'source_prompt': 'An astronaut riding a horse',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'eta': 0.1,
'strength': 0.8,
'guidance_scale': 3,
'source_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple ='cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase : Tuple =self.get_dummy_components()
__UpperCamelCase : Optional[Any] =CycleDiffusionPipeline(**lowerCamelCase__ )
__UpperCamelCase : int =pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =self.get_dummy_inputs(lowerCamelCase__ )
__UpperCamelCase : Dict =pipe(**lowerCamelCase__ )
__UpperCamelCase : int =output.images
__UpperCamelCase : Optional[int] =images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCamelCase : Any =np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =self.get_dummy_components()
for name, module in components.items():
if hasattr(lowerCamelCase__ , 'half' ):
__UpperCamelCase : List[str] =module.half()
__UpperCamelCase : int =CycleDiffusionPipeline(**lowerCamelCase__ )
__UpperCamelCase : Any =pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : int =self.get_dummy_inputs(lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =pipe(**lowerCamelCase__ )
__UpperCamelCase : Dict =output.images
__UpperCamelCase : int =images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__UpperCamelCase : Optional[int] =np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __lowercase ( self ):
"""simple docstring"""
return super().test_save_load_local()
@unittest.skip('non-deterministic pipeline' )
def __lowercase ( self ):
"""simple docstring"""
return super().test_inference_batch_single_identical()
@skip_mps
def __lowercase ( self ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __lowercase ( self ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def __lowercase ( self ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
__UpperCamelCase : Union[str, Any] =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy' )
__UpperCamelCase : Union[str, Any] =init_image.resize((512, 512) )
__UpperCamelCase : Dict ='CompVis/stable-diffusion-v1-4'
__UpperCamelCase : Optional[int] =DDIMScheduler.from_pretrained(lowerCamelCase__ , subfolder='scheduler' )
__UpperCamelCase : List[str] =CycleDiffusionPipeline.from_pretrained(
lowerCamelCase__ , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ , torch_dtype=torch.floataa , revision='fp16' )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
__UpperCamelCase : str ='A black colored car'
__UpperCamelCase : Optional[int] ='A blue colored car'
__UpperCamelCase : Optional[int] =torch.manual_seed(0 )
__UpperCamelCase : Dict =pipe(
prompt=lowerCamelCase__ , source_prompt=lowerCamelCase__ , image=lowerCamelCase__ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowerCamelCase__ , output_type='np' , )
__UpperCamelCase : str =output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
__UpperCamelCase : int =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy' )
__UpperCamelCase : Optional[int] =init_image.resize((512, 512) )
__UpperCamelCase : Optional[int] ='CompVis/stable-diffusion-v1-4'
__UpperCamelCase : Any =DDIMScheduler.from_pretrained(lowerCamelCase__ , subfolder='scheduler' )
__UpperCamelCase : Optional[Any] =CycleDiffusionPipeline.from_pretrained(lowerCamelCase__ , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
pipe.enable_attention_slicing()
__UpperCamelCase : Union[str, Any] ='A black colored car'
__UpperCamelCase : Optional[Any] ='A blue colored car'
__UpperCamelCase : str =torch.manual_seed(0 )
__UpperCamelCase : List[Any] =pipe(
prompt=lowerCamelCase__ , source_prompt=lowerCamelCase__ , image=lowerCamelCase__ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowerCamelCase__ , output_type='np' , )
__UpperCamelCase : Any =output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 71 |
def A ( a_ ) -> int:
__UpperCamelCase : Any =len(a_ )
while cur > 1:
# Find the maximum number in arr
__UpperCamelCase : Any =arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
__UpperCamelCase : Any =arr[mi::-1] + arr[mi + 1 : len(a_ )]
# Reverse whole list
__UpperCamelCase : str =arr[cur - 1 :: -1] + arr[cur : len(a_ )]
cur -= 1
return arr
if __name__ == "__main__":
A_ :Dict = input('''Enter numbers separated by a comma:\n''').strip()
A_ :Any = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 71 | 1 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
a : Optional[Any] = logging.get_logger(__name__)
@dataclass
class _a :
A = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
A = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
A = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
A = field(
default=a__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def __snake_case (self ) -> int:
UpperCAmelCase_: Union[str, Any] = self.task_name.lower()
class _a ( a__ ):
A = "train"
A = "dev"
A = "test"
class _a ( a__ ):
A = 42
A = 42
A = 42
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = Split.train, SCREAMING_SNAKE_CASE_ = None, ) -> Tuple:
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""", SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: Tuple = args
UpperCAmelCase_: Tuple = glue_processors[args.task_name]()
UpperCAmelCase_: Any = glue_output_modes[args.task_name]
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
try:
UpperCAmelCase_: List[Any] = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
UpperCAmelCase_: Union[str, Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir, f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}', )
UpperCAmelCase_: Dict = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_: Optional[Any] = label_list[2], label_list[1]
UpperCAmelCase_: Optional[int] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase_: Dict = cached_features_file + '.lock'
with FileLock(SCREAMING_SNAKE_CASE_ ):
if os.path.exists(SCREAMING_SNAKE_CASE_ ) and not args.overwrite_cache:
UpperCAmelCase_: Tuple = time.time()
UpperCAmelCase_: Optional[int] = torch.load(SCREAMING_SNAKE_CASE_ )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]', time.time() - start )
else:
logger.info(f'Creating features from dataset file at {args.data_dir}' )
if mode == Split.dev:
UpperCAmelCase_: Optional[Any] = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
UpperCAmelCase_: str = self.processor.get_test_examples(args.data_dir )
else:
UpperCAmelCase_: Any = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
UpperCAmelCase_: Union[str, Any] = examples[:limit_length]
UpperCAmelCase_: List[Any] = glue_convert_examples_to_features(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, max_length=args.max_seq_length, label_list=SCREAMING_SNAKE_CASE_, output_mode=self.output_mode, )
UpperCAmelCase_: List[Any] = time.time()
torch.save(self.features, SCREAMING_SNAKE_CASE_ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__(self ) -> Dict:
return len(self.features )
def __getitem__(self, SCREAMING_SNAKE_CASE_ ) -> InputFeatures:
return self.features[i]
def __snake_case (self ) -> Optional[Any]:
return self.label_list
| 364 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
a : Tuple = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 82 | 0 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
SCREAMING_SNAKE_CASE__ : Dict = {'target_lang': 'fi', 'source_lang': 'en'}
SCREAMING_SNAKE_CASE__ : Any = '>>zh<<'
SCREAMING_SNAKE_CASE__ : int = 'Helsinki-NLP/'
if is_torch_available():
SCREAMING_SNAKE_CASE__ : Optional[int] = 'pt'
elif is_tf_available():
SCREAMING_SNAKE_CASE__ : str = 'tf'
else:
SCREAMING_SNAKE_CASE__ : Dict = 'jax'
@require_sentencepiece
class UpperCamelCase__ (lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Any = MarianTokenizer
lowerCamelCase_ : Tuple = False
lowerCamelCase_ : List[str] = True
def _lowercase ( self ) -> Union[str, Any]:
super().setUp()
lowerCamelCase : Tuple = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
lowerCamelCase : Optional[int] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
lowerCamelCase : int = Path(self.tmpdirname )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["vocab"] )
save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["source_spm"] )
copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["target_spm"] )
lowerCamelCase : Optional[int] = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self , **UpperCamelCase__ ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> Optional[Any]:
return (
"This is a test",
"This is a test",
)
def _lowercase ( self ) -> Any:
lowerCamelCase : Dict = "</s>"
lowerCamelCase : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def _lowercase ( self ) -> List[str]:
lowerCamelCase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(UpperCamelCase__ ) , 9 )
def _lowercase ( self ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def _lowercase ( self ) -> str:
lowerCamelCase : Any = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' )
lowerCamelCase : Union[str, Any] = en_de_tokenizer(["I am a small frog"] , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(UpperCamelCase__ , batch.input_ids[0] )
lowerCamelCase : Union[str, Any] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(UpperCamelCase__ )
lowerCamelCase : Tuple = [x.name for x in Path(UpperCamelCase__ ).glob("*" )]
self.assertIn("source.spm" , UpperCamelCase__ )
MarianTokenizer.from_pretrained(UpperCamelCase__ )
def _lowercase ( self ) -> Any:
lowerCamelCase : Dict = self.get_tokenizer()
lowerCamelCase : Optional[Any] = tok(
["I am a small frog" * 1000, "I am a small frog"] , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def _lowercase ( self ) -> List[Any]:
lowerCamelCase : List[Any] = self.get_tokenizer()
lowerCamelCase : Any = tok(["I am a tiny frog", "I am a small frog"] , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def _lowercase ( self ) -> List[str]:
# fmt: off
lowerCamelCase : Dict = {"input_ids": [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , )
def _lowercase ( self ) -> int:
lowerCamelCase : Optional[int] = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" )
lowerCamelCase : int = "Tämä on testi"
lowerCamelCase : List[str] = "This is a test"
lowerCamelCase : Optional[int] = [76, 7, 2047, 2]
lowerCamelCase : List[str] = [69, 12, 11, 940, 2]
lowerCamelCase : Optional[int] = tokenizer(UpperCamelCase__ ).input_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : int = tokenizer(text_target=UpperCamelCase__ ).input_ids
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : List[str] = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 48 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ :
"""simple docstring"""
def __init__( self :Dict , snake_case :Optional[int] , snake_case :Tuple=13 , snake_case :List[Any]=30 , snake_case :Union[str, Any]=2 , snake_case :List[Any]=3 , snake_case :Tuple=True , snake_case :Dict=True , snake_case :Dict=32 , snake_case :List[str]=5 , snake_case :Optional[Any]=4 , snake_case :Any=37 , snake_case :Dict="gelu" , snake_case :List[str]=0.1 , snake_case :str=0.1 , snake_case :Tuple=10 , snake_case :str=0.02 , snake_case :Optional[Any]=None , ):
'''simple docstring'''
A_ : Tuple = parent
A_ : int = batch_size
A_ : List[str] = image_size
A_ : List[Any] = patch_size
A_ : Optional[Any] = num_channels
A_ : List[Any] = is_training
A_ : Tuple = use_labels
A_ : Union[str, Any] = hidden_size
A_ : Tuple = num_hidden_layers
A_ : Any = num_attention_heads
A_ : List[str] = intermediate_size
A_ : Optional[int] = hidden_act
A_ : List[str] = hidden_dropout_prob
A_ : str = attention_probs_dropout_prob
A_ : Any = type_sequence_label_size
A_ : List[str] = initializer_range
A_ : Dict = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A_ : Optional[int] = (image_size // patch_size) ** 2
A_ : List[str] = num_patches + 1
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Tuple = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Dict = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :List[Any] , snake_case :str , snake_case :Tuple ):
'''simple docstring'''
A_ : Optional[Any] = ViTMSNModel(config=snake_case )
model.to(snake_case )
model.eval()
A_ : int = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Optional[int] , snake_case :List[str] , snake_case :List[str] ):
'''simple docstring'''
A_ : Dict = self.type_sequence_label_size
A_ : Tuple = ViTMSNForImageClassification(snake_case )
model.to(snake_case )
model.eval()
A_ : Union[str, Any] = model(snake_case , labels=snake_case )
print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" )
print("Labels: {labels}" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ : Union[str, Any] = 1
A_ : int = ViTMSNForImageClassification(snake_case )
model.to(snake_case )
model.eval()
A_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : Optional[Any] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : List[str] = self.prepare_config_and_inputs()
A_ , A_ , A_ : Optional[int] = config_and_inputs
A_ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__UpperCamelCase = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = ViTMSNModelTester(self )
A_ : str = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMSN does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ , A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[int] = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ , A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(snake_case )
A_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : List[str] = [*signature.parameters.keys()]
A_ : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[Any] = ViTMSNModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def __snake_case ( ) -> Optional[Any]:
A_ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
torch.manual_seed(2 )
A_ : Any = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(snake_case )
A_ : List[str] = self.default_image_processor
A_ : int = prepare_img()
A_ : List[str] = image_processor(images=snake_case , return_tensors="pt" ).to(snake_case )
# forward pass
with torch.no_grad():
A_ : Optional[int] = model(**snake_case )
# verify the logits
A_ : List[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case )
A_ : int = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1e-4 ) )
| 300 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""UniSpeechForCTC""",
"""UniSpeechForPreTraining""",
"""UniSpeechForSequenceClassification""",
"""UniSpeechModel""",
"""UniSpeechPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 369 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = BlenderbotSmallTokenizer
__UpperCAmelCase : Tuple = False
def __lowercase ( self : List[Any] ):
'''simple docstring'''
super().setUp()
_a : List[str] = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
_a : Tuple = dict(zip(_a ,range(len(_a ) ) ) )
_a : List[Any] = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
_a : List[Any] = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
_a : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_a : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(_a ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(_a ) )
def __lowercase ( self : List[Any] ,**_a : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname ,**_a )
def __lowercase ( self : Tuple ,_a : int ):
'''simple docstring'''
_a : Optional[Any] = 'adapt act apte'
_a : Dict = 'adapt act apte'
return input_text, output_text
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[int] = BlenderbotSmallTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
_a : Union[str, Any] = 'adapt act apte'
_a : Dict = ['adapt', 'act', 'ap@@', 'te']
_a : Tuple = tokenizer.tokenize(_a )
self.assertListEqual(_a ,_a )
_a : List[str] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_a : Dict = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) ,_a )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : str = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1384]
_a : Union[str, Any] = 'I am a small frog.'
_a : int = tok([src_text] ,padding=_a ,truncation=_a )['input_ids']
_a : str = tok.batch_decode(_a ,skip_special_tokens=_a ,clean_up_tokenization_spaces=_a )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
_a : Union[str, Any] = 'I am a small frog .'
_a : Optional[Any] = '.'
_a : Optional[Any] = tok(_a )['input_ids']
_a : Union[str, Any] = tok(_a )['input_ids']
assert encoded[-1] == encoded_dot[0]
| 5 | 0 |
'''simple docstring'''
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
snake_case_ : List[str] = True
from torch.cuda.amp import autocast
snake_case_ : Tuple = logging.getLogger(__name__)
def A__ ( UpperCAmelCase_=None , UpperCAmelCase_=None ):
return field(default_factory=lambda: default , metadata=UpperCAmelCase_ )
@dataclass
class lowercase__ :
lowercase__ = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
lowercase__ = field(
default=0.1 , metadata={"""help""": """The dropout ratio for the attention probabilities."""} )
lowercase__ = field(
default=0.1 , metadata={"""help""": """The dropout ratio for activations inside the fully connected layer."""} )
lowercase__ = field(
default=0.1 , metadata={
"""help""": """The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."""
} , )
lowercase__ = field(
default=0.1 , metadata={"""help""": """The dropout probabilitiy for all 1D convolutional layers in feature extractor."""} , )
lowercase__ = field(
default=0.05 , metadata={
"""help""": (
"""Propability of each feature vector along the time axis to be chosen as the start of the vector"""
"""span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"""
"""vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."""
)
} , )
lowercase__ = field(default=0.0 , metadata={"""help""": """The LayerDrop probability."""} )
@dataclass
class lowercase__ :
lowercase__ = field(
default=lowercase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowercase__ = field(
default="""train+validation""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to \'train\'"""
} , )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of validation examples to this """
"""value if set."""
)
} , )
lowercase__ = list_field(
default=[""",""", """?""", """.""", """!""", """-""", """;""", """:""", """\"\"""", """%""", """\'""", """\"""", """�"""] , metadata={"""help""": """A list of characters to remove from the transcripts."""} , )
@dataclass
class lowercase__ :
lowercase__ = 42
lowercase__ = True
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
def __call__( self : Optional[Any] ,lowerCamelCase__ : List[Dict[str, Union[List[int], torch.Tensor]]] ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = [{'input_values': feature['input_values']} for feature in features]
_UpperCamelCase : Optional[int] = [{'input_ids': feature['labels']} for feature in features]
_UpperCamelCase : List[Any] = self.processor.pad(
A_ ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors='pt' ,)
_UpperCamelCase : Optional[int] = self.processor.pad(
labels=A_ ,padding=self.padding ,max_length=self.max_length_labels ,pad_to_multiple_of=self.pad_to_multiple_of_labels ,return_tensors='pt' ,)
# replace padding with -100 to ignore loss correctly
_UpperCamelCase : Any = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1 ) ,-100 )
_UpperCamelCase : Optional[int] = labels
return batch
class lowercase__ ( lowercase ):
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : nn.Module ,lowerCamelCase__ : Dict[str, Union[torch.Tensor, Any]] ):
'''simple docstring'''
model.train()
_UpperCamelCase : int = self._prepare_inputs(A_ )
if self.use_amp:
with autocast():
_UpperCamelCase : str = self.compute_loss(A_ ,A_ )
else:
_UpperCamelCase : str = self.compute_loss(A_ ,A_ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
_UpperCamelCase : List[str] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_UpperCamelCase : Optional[Any] = loss.sum() / (inputs['labels'] >= 0).sum()
else:
raise ValueError(F'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']' )
if self.args.gradient_accumulation_steps > 1:
_UpperCamelCase : Any = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(A_ ).backward()
elif self.use_apex:
with amp.scale_loss(A_ ,self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(A_ )
else:
loss.backward()
return loss.detach()
def A__ ( ):
_UpperCamelCase : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : int = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_UpperCamelCase : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCamelCase : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , UpperCAmelCase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
_UpperCamelCase : Tuple = datasets.load_dataset(
'common_voice' , data_args.dataset_config_name , split=data_args.train_split_name )
_UpperCamelCase : str = datasets.load_dataset('common_voice' , data_args.dataset_config_name , split='test' )
# Create and save tokenizer
_UpperCamelCase : int = f'[{"".join(data_args.chars_to_ignore )}]'
def remove_special_characters(UpperCAmelCase_ ):
_UpperCamelCase : Optional[int] = re.sub(UpperCAmelCase_ , '' , batch['sentence'] ).lower() + ' '
return batch
_UpperCamelCase : List[Any] = train_dataset.map(UpperCAmelCase_ , remove_columns=['sentence'] )
_UpperCamelCase : Any = eval_dataset.map(UpperCAmelCase_ , remove_columns=['sentence'] )
def extract_all_chars(UpperCAmelCase_ ):
_UpperCamelCase : str = ' '.join(batch['text'] )
_UpperCamelCase : List[Any] = list(set(UpperCAmelCase_ ) )
return {"vocab": [vocab], "all_text": [all_text]}
_UpperCamelCase : Union[str, Any] = train_dataset.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , batch_size=-1 , keep_in_memory=UpperCAmelCase_ , remove_columns=train_dataset.column_names , )
_UpperCamelCase : str = train_dataset.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , batch_size=-1 , keep_in_memory=UpperCAmelCase_ , remove_columns=eval_dataset.column_names , )
_UpperCamelCase : Union[str, Any] = list(set(vocab_train['vocab'][0] ) | set(vocab_test['vocab'][0] ) )
_UpperCamelCase : int = {v: k for k, v in enumerate(UpperCAmelCase_ )}
_UpperCamelCase : Optional[int] = vocab_dict[' ']
del vocab_dict[" "]
_UpperCamelCase : Optional[int] = len(UpperCAmelCase_ )
_UpperCamelCase : Any = len(UpperCAmelCase_ )
with open('vocab.json' , 'w' ) as vocab_file:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase : Dict = WavaVecaCTCTokenizer(
'vocab.json' , unk_token='[UNK]' , pad_token='[PAD]' , word_delimiter_token='|' , )
_UpperCamelCase : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0.0 , do_normalize=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ )
_UpperCamelCase : Any = WavaVecaProcessor(feature_extractor=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ )
_UpperCamelCase : Dict = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='mean' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
_UpperCamelCase : int = min(len(UpperCAmelCase_ ) , data_args.max_train_samples )
_UpperCamelCase : Dict = train_dataset.select(range(UpperCAmelCase_ ) )
if data_args.max_val_samples is not None:
_UpperCamelCase : Any = eval_dataset.select(range(data_args.max_val_samples ) )
_UpperCamelCase : Any = torchaudio.transforms.Resample(4_8_0_0_0 , 1_6_0_0_0 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(UpperCAmelCase_ ):
_UpperCamelCase , _UpperCamelCase : Dict = torchaudio.load(batch['path'] )
_UpperCamelCase : Dict = resampler(UpperCAmelCase_ ).squeeze().numpy()
_UpperCamelCase : int = 1_6_0_0_0
_UpperCamelCase : Optional[Any] = batch['text']
return batch
_UpperCamelCase : Any = train_dataset.map(
UpperCAmelCase_ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
_UpperCamelCase : Tuple = eval_dataset.map(
UpperCAmelCase_ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(UpperCAmelCase_ ):
# check that all files have the correct sampling rate
assert (
len(set(batch['sampling_rate'] ) ) == 1
), f'Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'
_UpperCamelCase : List[str] = processor(
audio=batch['speech'] , text=batch['target_text'] , sampling_rate=batch['sampling_rate'][0] )
batch.update(UpperCAmelCase_ )
return batch
_UpperCamelCase : Any = train_dataset.map(
UpperCAmelCase_ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=UpperCAmelCase_ , num_proc=data_args.preprocessing_num_workers , )
_UpperCamelCase : Any = eval_dataset.map(
UpperCAmelCase_ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=UpperCAmelCase_ , num_proc=data_args.preprocessing_num_workers , )
# Metric
_UpperCamelCase : str = datasets.load_metric('wer' )
def compute_metrics(UpperCAmelCase_ ):
_UpperCamelCase : Optional[int] = pred.predictions
_UpperCamelCase : int = np.argmax(UpperCAmelCase_ , axis=-1 )
_UpperCamelCase : Optional[Any] = processor.tokenizer.pad_token_id
_UpperCamelCase : str = processor.batch_decode(UpperCAmelCase_ )
# we do not want to group tokens when computing the metrics
_UpperCamelCase : str = processor.batch_decode(pred.label_ids , group_tokens=UpperCAmelCase_ )
_UpperCamelCase : Dict = wer_metric.compute(predictions=UpperCAmelCase_ , references=UpperCAmelCase_ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
_UpperCamelCase : Dict = DataCollatorCTCWithPadding(processor=UpperCAmelCase_ , padding=UpperCAmelCase_ )
# Initialize our Trainer
_UpperCamelCase : List[str] = CTCTrainer(
model=UpperCAmelCase_ , data_collator=UpperCAmelCase_ , args=UpperCAmelCase_ , compute_metrics=UpperCAmelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_UpperCamelCase : List[str] = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
_UpperCamelCase : int = model_args.model_name_or_path
else:
_UpperCamelCase : str = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
_UpperCamelCase : Dict = trainer.train(resume_from_checkpoint=UpperCAmelCase_ )
trainer.save_model()
_UpperCamelCase : Optional[Any] = train_result.metrics
_UpperCamelCase : Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCAmelCase_ )
)
_UpperCamelCase : int = min(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
trainer.log_metrics('train' , UpperCAmelCase_ )
trainer.save_metrics('train' , UpperCAmelCase_ )
trainer.save_state()
# Evaluation
_UpperCamelCase : List[str] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCamelCase : Dict = trainer.evaluate()
_UpperCamelCase : Dict = data_args.max_val_samples if data_args.max_val_samples is not None else len(UpperCAmelCase_ )
_UpperCamelCase : List[Any] = min(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
trainer.log_metrics('eval' , UpperCAmelCase_ )
trainer.save_metrics('eval' , UpperCAmelCase_ )
return results
if __name__ == "__main__":
main()
| 83 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase : str = {
"configuration_nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = [
"NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST",
"NezhaForNextSentencePrediction",
"NezhaForMaskedLM",
"NezhaForPreTraining",
"NezhaForMultipleChoice",
"NezhaForQuestionAnswering",
"NezhaForSequenceClassification",
"NezhaForTokenClassification",
"NezhaModel",
"NezhaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
lowerCamelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 204 | 0 |
from ...processing_utils import ProcessorMixin
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : List[str] = """SpeechT5FeatureExtractor"""
lowerCAmelCase : Any = """SpeechT5Tokenizer"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ ):
super().__init__(UpperCAmelCase__ , UpperCAmelCase__ )
def __call__( self , *UpperCAmelCase__ , **UpperCAmelCase__ ):
A__ = kwargs.pop("audio" , UpperCAmelCase__ )
A__ = kwargs.pop("text" , UpperCAmelCase__ )
A__ = kwargs.pop("text_target" , UpperCAmelCase__ )
A__ = kwargs.pop("audio_target" , UpperCAmelCase__ )
A__ = kwargs.pop("sampling_rate" , UpperCAmelCase__ )
if audio is not None and text is not None:
raise ValueError(
"Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?" )
if audio_target is not None and text_target is not None:
raise ValueError(
"Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process." )
if audio is not None:
A__ = self.feature_extractor(UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__ )
elif text is not None:
A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__ )
else:
A__ = None
if audio_target is not None:
A__ = self.feature_extractor(audio_target=UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__ )
A__ = targets["input_values"]
elif text_target is not None:
A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__ )
A__ = targets["input_ids"]
else:
A__ = None
if inputs is None:
return targets
if targets is not None:
A__ = labels
A__ = targets.get("attention_mask" )
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def __A ( self , *UpperCAmelCase__ , **UpperCAmelCase__ ):
A__ = kwargs.pop("input_values" , UpperCAmelCase__ )
A__ = kwargs.pop("input_ids" , UpperCAmelCase__ )
A__ = kwargs.pop("labels" , UpperCAmelCase__ )
if input_values is not None and input_ids is not None:
raise ValueError("Cannot process both `input_values` and `input_ids` inputs." )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded." )
if input_values is not None:
A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ )
elif input_ids is not None:
A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__ )
else:
A__ = None
if labels is not None:
if "input_ids" in labels or (isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and "input_ids" in labels[0]):
A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__ )
A__ = targets["input_ids"]
else:
A__ = self.feature_extractor.feature_size
A__ = self.feature_extractor.num_mel_bins
A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ )
A__ = feature_size_hack
A__ = targets["input_values"]
else:
A__ = None
if inputs is None:
return targets
if targets is not None:
A__ = labels
A__ = targets.get("attention_mask" )
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def __A ( self , *UpperCAmelCase__ , **UpperCAmelCase__ ):
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __A ( self , *UpperCAmelCase__ , **UpperCAmelCase__ ):
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 198 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def UpperCamelCase ( _A : List[Any] , _A : List[str]=7 )-> Optional[Any]:
"""simple docstring"""
A__ = None
if token is not None:
A__ = {"Accept": "application/vnd.github+json", "Authorization": f"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
A__ = "636036"
A__ = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
A__ = requests.get(_A , headers=_A ).json()
return result["workflow_runs"]
def UpperCamelCase ( _A : str )-> Dict:
"""simple docstring"""
A__ = get_daily_ci_runs(_A )
A__ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
A__ = workflow_run["id"]
break
return workflow_run_id
def UpperCamelCase ( _A : int , _A : List[str] , _A : str )-> Any:
"""simple docstring"""
A__ = get_last_daily_ci_runs(_A )
if workflow_run_id is not None:
A__ = get_artifacts_links(worflow_run_id=_A , token=_A )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
A__ = artifacts_links[artifact_name]
download_artifact(
artifact_name=_A , artifact_url=_A , output_dir=_A , token=_A )
def UpperCamelCase ( _A : Optional[Any] , _A : Any , _A : List[Any] )-> Optional[int]:
"""simple docstring"""
get_last_daily_ci_artifacts(_A , _A , _A )
A__ = {}
for artifact_name in artifact_names:
A__ = os.path.join(_A , f"""{artifact_name}.zip""" )
if os.path.isfile(_A ):
A__ = {}
with zipfile.ZipFile(_A ) as z:
for filename in z.namelist():
if not os.path.isdir(_A ):
# read the file
with z.open(_A ) as f:
A__ = f.read().decode("UTF-8" )
return results
| 198 | 1 |
"""simple docstring"""
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
print('''Loading config file...''' )
def flatten_yaml_as_dict(__UpperCamelCase , __UpperCamelCase="" , __UpperCamelCase="." ):
__A = []
for k, v in d.items():
__A = parent_key + sep + k if parent_key else k
if isinstance(__UpperCamelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(__UpperCamelCase , __UpperCamelCase , sep=__UpperCamelCase ).items() )
else:
items.append((new_key, v) )
return dict(__UpperCamelCase )
__A = argparse.Namespace()
with open(__UpperCamelCase , '''r''' ) as yaml_file:
try:
__A = yaml.load(__UpperCamelCase , Loader=yaml.FullLoader )
__A = flatten_yaml_as_dict(__UpperCamelCase )
for k, v in flat_cfg.items():
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(__UpperCamelCase , str(__UpperCamelCase ) ) )
return config
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = MobileViTVaConfig()
__A = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
__A = 1_0_0_0
if int(task_name.strip().split('''_''' )[-1] ) == 3_8_4:
__A = 3_8_4
else:
__A = 2_5_6
__A = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
__A = 2_1_0_0_0
if int(task_name.strip().split('''_''' )[-1] ) == 3_8_4:
__A = 3_8_4
else:
__A = 2_5_6
__A = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
__A = 1_5_1
__A = 5_1_2
__A = '''ade20k-id2label.json'''
__A = True
elif task_name.startswith('''voc_''' ):
__A = 2_1
__A = 5_1_2
__A = '''pascal-voc-id2label.json'''
__A = True
# orig_config
__A = load_orig_config_file(__UpperCamelCase )
assert getattr(__UpperCamelCase , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
__A = getattr(__UpperCamelCase , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(__UpperCamelCase , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
__A = getattr(__UpperCamelCase , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
__A = getattr(__UpperCamelCase , '''model.segmentation.output_stride''' , 1_6 )
if "_deeplabv3" in task_name:
__A = getattr(__UpperCamelCase , '''model.segmentation.deeplabv3.aspp_rates''' , [1_2, 2_4, 3_6] )
__A = getattr(__UpperCamelCase , '''model.segmentation.deeplabv3.aspp_out_channels''' , 5_1_2 )
__A = getattr(__UpperCamelCase , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
__A = '''huggingface/label-files'''
__A = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
__A = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
__A = idalabel
__A = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = dct.pop(__UpperCamelCase )
__A = val
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=False ):
"""simple docstring"""
if base_model:
__A = ''''''
else:
__A = '''mobilevitv2.'''
__A = []
for k in state_dict.keys():
if k[:8] == "encoder.":
__A = k[8:]
else:
__A = k
if ".block." in k:
__A = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
__A = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
__A = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
__A = k_new.replace('''conv_1.''' , f'{model_prefix}conv_stem.' )
for i in [1, 2]:
if f'layer_{i}.' in k:
__A = k_new.replace(f'layer_{i}.' , f'{model_prefix}encoder.layer.{i-1}.layer.' )
if ".exp_1x1." in k:
__A = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
__A = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f'layer_{i}.0.' in k:
__A = k_new.replace(f'layer_{i}.0.' , f'{model_prefix}encoder.layer.{i-1}.downsampling_layer.' )
if f'layer_{i}.1.local_rep.0.' in k:
__A = k_new.replace(f'layer_{i}.1.local_rep.0.' , f'{model_prefix}encoder.layer.{i-1}.conv_kxk.' )
if f'layer_{i}.1.local_rep.1.' in k:
__A = k_new.replace(f'layer_{i}.1.local_rep.1.' , f'{model_prefix}encoder.layer.{i-1}.conv_1x1.' )
for i in [3, 4, 5]:
if i == 3:
__A = [0, 1]
elif i == 4:
__A = [0, 1, 2, 3]
elif i == 5:
__A = [0, 1, 2]
for j in j_in:
if f'layer_{i}.1.global_rep.{j}.' in k:
__A = k_new.replace(
f'layer_{i}.1.global_rep.{j}.' , f'{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.' )
if f'layer_{i}.1.global_rep.{j+1}.' in k:
__A = k_new.replace(
f'layer_{i}.1.global_rep.{j+1}.' , f'{model_prefix}encoder.layer.{i-1}.layernorm.' )
if f'layer_{i}.1.conv_proj.' in k:
__A = k_new.replace(f'layer_{i}.1.conv_proj.' , f'{model_prefix}encoder.layer.{i-1}.conv_projection.' )
if "pre_norm_attn.0." in k:
__A = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
__A = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
__A = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
__A = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
__A = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
__A = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
__A = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
__A = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
__A = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(__UpperCamelCase )
for k in keys_to_ignore:
state_dict.pop(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase ( ):
"""simple docstring"""
__A = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
__A = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = get_mobilevitva_config(__UpperCamelCase , __UpperCamelCase )
# load original state_dict
__A = torch.load(__UpperCamelCase , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
__A = MobileViTVaForSemanticSegmentation(__UpperCamelCase ).eval()
__A = False
else:
__A = MobileViTVaForImageClassification(__UpperCamelCase ).eval()
__A = False
# remove and rename some keys of load the original model
__A = checkpoint
remove_unused_keys(__UpperCamelCase )
__A = create_rename_keys(__UpperCamelCase , base_model=__UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# load modified state_dict
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
__A = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 3_2 )
__A = image_processor(images=prepare_img() , return_tensors='''pt''' )
__A = model(**__UpperCamelCase )
# verify classification model
if task_name.startswith('''imagenet''' ):
__A = outputs.logits
__A = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
__A = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] )
assert torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1e-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'Saving model {task_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCamelCase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task',
default='imagenet1k_256',
type=str,
help=(
'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '
'\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n '
),
choices=[
'imagenet1k_256',
'imagenet1k_384',
'imagenet21k_to_1k_256',
'imagenet21k_to_1k_384',
'ade20k_deeplabv3',
'voc_deeplabv3',
],
)
parser.add_argument(
'--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
lowercase_ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 266 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase_ = logging.get_logger(__name__)
class snake_case ( _lowerCAmelCase ):
'''simple docstring'''
A_ : int = ["input_features", "attention_mask"]
def __init__( self : Optional[Any], _lowerCamelCase : Union[str, Any]=80, _lowerCamelCase : int=1_60_00, _lowerCamelCase : Any=80, _lowerCamelCase : List[str]=0.0, _lowerCamelCase : int=True, _lowerCamelCase : Optional[Any]=True, _lowerCamelCase : Optional[int]=True, **_lowerCamelCase : List[str], ):
'''simple docstring'''
super().__init__(feature_size=_lowerCamelCase, sampling_rate=_lowerCamelCase, padding_value=_lowerCamelCase, **_lowerCamelCase )
__A = num_mel_bins
__A = do_ceptral_normalize
__A = normalize_means
__A = normalize_vars
__A = True
def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : np.ndarray, ):
'''simple docstring'''
__A = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
__A = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
__A = ta_kaldi.fbank(_lowerCamelCase, num_mel_bins=self.num_mel_bins, sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : np.ndarray, _lowerCamelCase : int, _lowerCamelCase : Optional[bool] = True, _lowerCamelCase : Optional[bool] = True, _lowerCamelCase : float = 0.0, ):
'''simple docstring'''
# make sure we normalize float32 arrays
if normalize_means:
__A = x[:input_length].mean(axis=0 )
__A = np.subtract(_lowerCamelCase, _lowerCamelCase )
if normalize_vars:
__A = x[:input_length].std(axis=0 )
__A = np.divide(_lowerCamelCase, _lowerCamelCase )
if input_length < x.shape[0]:
__A = padding_value
# make sure array is in float32
__A = x.astype(np.floataa )
return x
def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : List[np.ndarray], _lowerCamelCase : Optional[np.ndarray] = None ):
'''simple docstring'''
__A = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(_lowerCamelCase, _lowerCamelCase, self.normalize_means, self.normalize_vars, self.padding_value )
for x, n in zip(_lowerCamelCase, _lowerCamelCase )
]
def __call__( self : Optional[Any], _lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], _lowerCamelCase : Union[bool, str, PaddingStrategy] = False, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : bool = False, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : Optional[Union[str, TensorType]] = None, _lowerCamelCase : Optional[int] = None, _lowerCamelCase : Optional[bool] = None, **_lowerCamelCase : Optional[Any], ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__A = isinstance(_lowerCamelCase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
__A = is_batched_numpy or (
isinstance(_lowerCamelCase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
__A = [np.asarray(_lowerCamelCase, dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_lowerCamelCase, np.ndarray ):
__A = np.asarray(_lowerCamelCase, dtype=np.floataa )
elif isinstance(_lowerCamelCase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__A = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__A = [raw_speech]
# extract fbank features
__A = [self._extract_fbank_features(_lowerCamelCase ) for waveform in raw_speech]
# convert into correct format for padding
__A = BatchFeature({'''input_features''': features} )
__A = self.pad(
_lowerCamelCase, padding=_lowerCamelCase, max_length=_lowerCamelCase, truncation=_lowerCamelCase, pad_to_multiple_of=_lowerCamelCase, return_attention_mask=_lowerCamelCase, **_lowerCamelCase, )
# make sure list is in array format
__A = padded_inputs.get('''input_features''' )
if isinstance(input_features[0], _lowerCamelCase ):
__A = [np.asarray(_lowerCamelCase, dtype=np.floataa ) for feature in input_features]
__A = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
__A = [np.asarray(_lowerCamelCase, dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
__A = (
np.array(_lowerCamelCase, dtype=np.intaa )
if self._get_padding_strategies(_lowerCamelCase, max_length=_lowerCamelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__A = self.normalize(
padded_inputs['''input_features'''], attention_mask=_lowerCamelCase )
if return_tensors is not None:
__A = padded_inputs.convert_to_tensors(_lowerCamelCase )
return padded_inputs
| 266 | 1 |
def a__ ( A__, A__ ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (boundary[1] - boundary[0]) / steps
SCREAMING_SNAKE_CASE_ : List[Any] = boundary[0]
SCREAMING_SNAKE_CASE_ : Any = boundary[1]
SCREAMING_SNAKE_CASE_ : Optional[int] = make_points(A__, A__, A__ )
SCREAMING_SNAKE_CASE_ : List[str] = 0.0
y += (h / 2.0) * f(A__ )
for i in x_i:
# print(i)
y += h * f(A__ )
y += (h / 2.0) * f(A__ )
return y
def a__ ( A__, A__, A__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = a + h
while x < (b - h):
yield x
SCREAMING_SNAKE_CASE_ : Optional[Any] = x + h
def a__ ( A__ ): # enter your function here
SCREAMING_SNAKE_CASE_ : List[str] = (x - 0) * (x - 0)
return y
def a__ ( ):
SCREAMING_SNAKE_CASE_ : List[Any] = 0.0 # Lower bound of integration
SCREAMING_SNAKE_CASE_ : Optional[int] = 1.0 # Upper bound of integration
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 10.0 # define number of steps or resolution
SCREAMING_SNAKE_CASE_ : str = [a, b] # define boundary of integration
SCREAMING_SNAKE_CASE_ : int = method_a(A__, A__ )
print(F'''y = {y}''' )
if __name__ == "__main__":
main()
| 162 |
import numpy
# List of input, output pairs
lowerCAmelCase__ : int =(
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
lowerCAmelCase__ : Any =(((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
lowerCAmelCase__ : List[str] =[2, 4, 1, 5]
lowerCAmelCase__ : Dict =len(train_data)
lowerCAmelCase__ : Union[str, Any] =0.0_0_9
def a__ ( A__, A__="train" ):
return calculate_hypothesis_value(A__, A__ ) - output(
A__, A__ )
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Tuple = 0
for i in range(len(A__ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def a__ ( A__, A__ ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def a__ ( A__, A__ ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def a__ ( A__, A__=m ):
SCREAMING_SNAKE_CASE_ : Tuple = 0
for i in range(A__ ):
if index == -1:
summation_value += _error(A__ )
else:
summation_value += _error(A__ ) * train_data[i][0][index]
return summation_value
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Any = summation_of_cost_derivative(A__, A__ ) / m
return cost_derivative_value
def a__ ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
SCREAMING_SNAKE_CASE_ : str = 0.00_00_02
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Any = 0
while True:
j += 1
SCREAMING_SNAKE_CASE_ : int = [0, 0, 0, 0]
for i in range(0, len(A__ ) ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_cost_derivative(i - 1 )
SCREAMING_SNAKE_CASE_ : str = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
A__, A__, atol=A__, rtol=A__, ):
break
SCREAMING_SNAKE_CASE_ : Optional[Any] = temp_parameter_vector
print(('Number of iterations:', j) )
def a__ ( ):
for i in range(len(A__ ) ):
print(('Actual output value:', output(A__, 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(A__, 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 162 | 1 |
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
UpperCAmelCase = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
UpperCAmelCase = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def __UpperCamelCase ( lowercase__ : str ):
'''simple docstring'''
__lowercase =numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ), dtype=lowercase__ )[0]
@deprecated(lowercase__, 'Please use tf.data to implement this functionality.' )
def __UpperCamelCase ( lowercase__ : List[str] ):
'''simple docstring'''
print('Extracting', f.name )
with gzip.GzipFile(fileobj=lowercase__ ) as bytestream:
__lowercase =_readaa(lowercase__ )
if magic != 20_51:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
__lowercase =_readaa(lowercase__ )
__lowercase =_readaa(lowercase__ )
__lowercase =_readaa(lowercase__ )
__lowercase =bytestream.read(rows * cols * num_images )
__lowercase =numpy.frombuffer(lowercase__, dtype=numpy.uinta )
__lowercase =data.reshape(lowercase__, lowercase__, lowercase__, 1 )
return data
@deprecated(lowercase__, 'Please use tf.one_hot on tensors.' )
def __UpperCamelCase ( lowercase__ : Union[str, Any], lowercase__ : List[Any] ):
'''simple docstring'''
__lowercase =labels_dense.shape[0]
__lowercase =numpy.arange(lowercase__ ) * num_classes
__lowercase =numpy.zeros((num_labels, num_classes) )
__lowercase =1
return labels_one_hot
@deprecated(lowercase__, 'Please use tf.data to implement this functionality.' )
def __UpperCamelCase ( lowercase__ : Dict, lowercase__ : Optional[Any]=False, lowercase__ : Tuple=10 ):
'''simple docstring'''
print('Extracting', f.name )
with gzip.GzipFile(fileobj=lowercase__ ) as bytestream:
__lowercase =_readaa(lowercase__ )
if magic != 20_49:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
__lowercase =_readaa(lowercase__ )
__lowercase =bytestream.read(lowercase__ )
__lowercase =numpy.frombuffer(lowercase__, dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(lowercase__, lowercase__ )
return labels
class lowerCAmelCase :
@deprecated(
__lowercase , 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.' , )
def __init__( self : Optional[int] , __lowercase : List[str] , __lowercase : Dict , __lowercase : Tuple=False , __lowercase : List[Any]=False , __lowercase : int=dtypes.floataa , __lowercase : str=True , __lowercase : Optional[int]=None , ):
"""simple docstring"""
__lowercase , __lowercase =random_seed.get_seed(__lowercase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__lowercase =dtypes.as_dtype(__lowercase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
__lowercase =10000
__lowercase =one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
__lowercase =images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__lowercase =images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__lowercase =images.astype(numpy.floataa )
__lowercase =numpy.multiply(__lowercase , 1.0 / 2_5_5.0 )
__lowercase =images
__lowercase =labels
__lowercase =0
__lowercase =0
@property
def snake_case ( self : List[str] ):
"""simple docstring"""
return self._images
@property
def snake_case ( self : int ):
"""simple docstring"""
return self._labels
@property
def snake_case ( self : List[str] ):
"""simple docstring"""
return self._num_examples
@property
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
return self._epochs_completed
def snake_case ( self : Optional[int] , __lowercase : Optional[int] , __lowercase : Dict=False , __lowercase : Dict=True ):
"""simple docstring"""
if fake_data:
__lowercase =[1] * 784
__lowercase =[1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__lowercase )],
[fake_label for _ in range(__lowercase )],
)
__lowercase =self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__lowercase =numpy.arange(self._num_examples )
numpy.random.shuffle(__lowercase )
__lowercase =self.images[perma]
__lowercase =self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__lowercase =self._num_examples - start
__lowercase =self._images[start : self._num_examples]
__lowercase =self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__lowercase =numpy.arange(self._num_examples )
numpy.random.shuffle(__lowercase )
__lowercase =self.images[perm]
__lowercase =self.labels[perm]
# Start next epoch
__lowercase =0
__lowercase =batch_size - rest_num_examples
__lowercase =self._index_in_epoch
__lowercase =self._images[start:end]
__lowercase =self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
__lowercase =self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(lowercase__, 'Please write your own downloading logic.' )
def __UpperCamelCase ( lowercase__ : Optional[Any], lowercase__ : Tuple, lowercase__ : Tuple ):
'''simple docstring'''
if not gfile.Exists(lowercase__ ):
gfile.MakeDirs(lowercase__ )
__lowercase =os.path.join(lowercase__, lowercase__ )
if not gfile.Exists(lowercase__ ):
urllib.request.urlretrieve(lowercase__, lowercase__ ) # noqa: S310
with gfile.GFile(lowercase__ ) as f:
__lowercase =f.size()
print('Successfully downloaded', lowercase__, lowercase__, 'bytes.' )
return filepath
@deprecated(
lowercase__, 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def __UpperCamelCase ( lowercase__ : List[Any], lowercase__ : Any=False, lowercase__ : List[Any]=False, lowercase__ : Dict=dtypes.floataa, lowercase__ : List[str]=True, lowercase__ : Tuple=50_00, lowercase__ : List[str]=None, lowercase__ : int=DEFAULT_SOURCE_URL, ):
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[], [], fake_data=lowercase__, one_hot=lowercase__, dtype=lowercase__, seed=lowercase__ )
__lowercase =fake()
__lowercase =fake()
__lowercase =fake()
return _Datasets(train=lowercase__, validation=lowercase__, test=lowercase__ )
if not source_url: # empty string check
__lowercase =DEFAULT_SOURCE_URL
__lowercase ='train-images-idx3-ubyte.gz'
__lowercase ='train-labels-idx1-ubyte.gz'
__lowercase ='t10k-images-idx3-ubyte.gz'
__lowercase ='t10k-labels-idx1-ubyte.gz'
__lowercase =_maybe_download(
lowercase__, lowercase__, source_url + train_images_file )
with gfile.Open(lowercase__, 'rb' ) as f:
__lowercase =_extract_images(lowercase__ )
__lowercase =_maybe_download(
lowercase__, lowercase__, source_url + train_labels_file )
with gfile.Open(lowercase__, 'rb' ) as f:
__lowercase =_extract_labels(lowercase__, one_hot=lowercase__ )
__lowercase =_maybe_download(
lowercase__, lowercase__, source_url + test_images_file )
with gfile.Open(lowercase__, 'rb' ) as f:
__lowercase =_extract_images(lowercase__ )
__lowercase =_maybe_download(
lowercase__, lowercase__, source_url + test_labels_file )
with gfile.Open(lowercase__, 'rb' ) as f:
__lowercase =_extract_labels(lowercase__, one_hot=lowercase__ )
if not 0 <= validation_size <= len(lowercase__ ):
__lowercase =(
'Validation size should be between 0 and '
F'''{len(lowercase__ )}. Received: {validation_size}.'''
)
raise ValueError(lowercase__ )
__lowercase =train_images[:validation_size]
__lowercase =train_labels[:validation_size]
__lowercase =train_images[validation_size:]
__lowercase =train_labels[validation_size:]
__lowercase ={'dtype': dtype, 'reshape': reshape, 'seed': seed}
__lowercase =_DataSet(lowercase__, lowercase__, **lowercase__ )
__lowercase =_DataSet(lowercase__, lowercase__, **lowercase__ )
__lowercase =_DataSet(lowercase__, lowercase__, **lowercase__ )
return _Datasets(train=lowercase__, validation=lowercase__, test=lowercase__ )
| 141 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_5_0, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_0_0, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_0_0, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
] )
class lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : int ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='utf-8' , check=__lowercase , )
assert hasattr(self , 'env' )
def snake_case ( self : Tuple , __lowercase : List[str] ):
"""simple docstring"""
__lowercase =f'''{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'''
# distributed data settings
__lowercase ={'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__lowercase , instance_count=__lowercase , instance_type=self.instance_type , debugger_hook_config=__lowercase , hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__lowercase , py_version='py36' , )
def snake_case ( self : int , __lowercase : List[str] ):
"""simple docstring"""
TrainingJobAnalytics(__lowercase ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def snake_case ( self : Tuple , __lowercase : List[Any] ):
"""simple docstring"""
__lowercase =self.create_estimator(__lowercase )
# run training
estimator.fit()
# result dataframe
__lowercase =TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowercase =list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
__lowercase =list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowercase =(
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , __lowercase )
| 141 | 1 |
"""simple docstring"""
def snake_case_(_UpperCamelCase ) -> bool:
"""simple docstring"""
_snake_case = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
_snake_case = set()
return any(
node not in visited and depth_first_search(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
for node in graph )
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> bool:
"""simple docstring"""
visited.add(_UpperCamelCase )
rec_stk.add(_UpperCamelCase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(_UpperCamelCase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 357 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase_ ( __lowercase , unittest.TestCase ):
UpperCamelCase_ : Union[str, Any] = CLIPTokenizer
UpperCamelCase_ : Optional[int] = CLIPTokenizerFast
UpperCamelCase_ : Dict = True
UpperCamelCase_ : Union[str, Any] = {}
UpperCamelCase_ : Optional[Any] = False
def UpperCamelCase_ ( self : Union[str, Any] ) -> Dict:
super().setUp()
# fmt: off
_snake_case = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
_snake_case = dict(zip(A__ , range(len(A__ ) ) ) )
_snake_case = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''']
_snake_case = {'''unk_token''': '''<unk>'''}
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A__ ) )
def UpperCamelCase_ ( self : List[Any] , **A__ : int ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **A__ )
def UpperCamelCase_ ( self : Any , **A__ : Tuple ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A__ )
def UpperCamelCase_ ( self : Optional[Any] , A__ : str ) -> str:
_snake_case = '''lower newer'''
_snake_case = '''lower newer'''
return input_text, output_text
def UpperCamelCase_ ( self : Union[str, Any] ) -> Optional[int]:
_snake_case = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_snake_case = '''lower newer'''
_snake_case = ['''lo''', '''w''', '''er</w>''', '''n''', '''e''', '''w''', '''er</w>''']
_snake_case = tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
_snake_case = tokens + [tokenizer.unk_token]
_snake_case = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , A__ )
@require_ftfy
def UpperCamelCase_ ( self : Any ) -> Optional[int]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case = self.tokenizer_class.from_pretrained(A__ , **A__ )
_snake_case = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
_snake_case = '''A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'''
_snake_case = tokenizer_s.tokenize(A__ )
_snake_case = tokenizer_r.tokenize(A__ )
self.assertListEqual(A__ , A__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
_snake_case = '''xa\u0303y''' + ''' ''' + '''x\xe3y'''
_snake_case = tokenizer_s.tokenize(A__ )
_snake_case = tokenizer_r.tokenize(A__ )
self.assertListEqual(A__ , A__ )
# Test that the tokenization is identical on unicode of space type
_snake_case = [
'''\u0009''', # (horizontal tab, '\t')
'''\u000B''', # (vertical tab)
'''\u000C''', # (form feed)
'''\u0020''', # (space, ' ')
'''\u200E''', # (left-to-right mark):w
'''\u200F''', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
_snake_case = tokenizer_s.tokenize(A__ )
_snake_case = tokenizer_r.tokenize(A__ )
self.assertListEqual(A__ , A__ )
# Test that the tokenization is identical on unicode of line break type
_snake_case = [
'''\u000A''', # (line feed, '\n')
'''\r\n''', # (carriage return and line feed, '\r\n')
'''\u000D''', # (carriage return, '\r')
'''\r''', # (carriage return, '\r')
'''\u000D''', # (carriage return, '\r')
'''\u2028''', # (line separator)
'''\u2029''', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
_snake_case = tokenizer_s.tokenize(A__ )
_snake_case = tokenizer_r.tokenize(A__ )
self.assertListEqual(A__ , A__ )
def UpperCamelCase_ ( self : List[Any] ) -> Optional[Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
_snake_case = f"""{text_of_1_token} {text_of_1_token}"""
_snake_case = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , )
_snake_case = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A__ ) + 1, len(A__ ) + 1 + len(A__ )) , )
_snake_case = f""" {text}"""
_snake_case = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , )
_snake_case = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A__ ) + 1, 1 + len(A__ ) + 1 + len(A__ )) , )
def UpperCamelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(A__ ) as context:
self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''' )
self.assertTrue(
context.exception.args[0].startswith(
'''The `backend_tokenizer` provided does not match the expected format.''' ) )
@require_ftfy
def UpperCamelCase_ ( self : Dict ) -> Union[str, Any]:
super().test_tokenization_python_rust_equals()
def UpperCamelCase_ ( self : str ) -> Optional[int]:
# CLIP always lower cases letters
pass
| 278 | 0 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Any = ["image_processor", "tokenizer"]
__snake_case : str = "AutoImageProcessor"
__snake_case : Optional[int] = "AutoTokenizer"
def __init__( self : Tuple ,lowerCamelCase__ : List[Any]=None ,lowerCamelCase__ : Optional[int]=None ,**lowerCamelCase__ : int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = kwargs.pop("""feature_extractor""" )
SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.image_processor
SCREAMING_SNAKE_CASE = False
def __call__( self : Union[str, Any] ,*lowerCamelCase__ : Tuple ,**lowerCamelCase__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = kwargs.pop("""images""" ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = kwargs.pop("""text""" ,lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
SCREAMING_SNAKE_CASE = args[0]
SCREAMING_SNAKE_CASE = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
SCREAMING_SNAKE_CASE = self.image_processor(lowerCamelCase__ ,*lowerCamelCase__ ,**lowerCamelCase__ )
if text is not None:
SCREAMING_SNAKE_CASE = self.tokenizer(lowerCamelCase__ ,**lowerCamelCase__ )
if text is None:
return inputs
elif images is None:
return encodings
else:
SCREAMING_SNAKE_CASE = encodings["""input_ids"""]
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,*lowerCamelCase__ : Union[str, Any] ,**lowerCamelCase__ : Tuple ) -> List[str]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ ,**lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : Any ) -> int:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ ,**lowerCamelCase__ )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = self.tokenizer
yield
SCREAMING_SNAKE_CASE = self.image_processor
SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Union[str, Any]=False ,lowerCamelCase__ : Optional[int]=None ) -> List[str]:
'''simple docstring'''
if added_vocab is None:
SCREAMING_SNAKE_CASE = self.tokenizer.get_added_vocab()
SCREAMING_SNAKE_CASE = {}
while tokens:
SCREAMING_SNAKE_CASE = re.search(R"""<s_(.*?)>""" ,lowerCamelCase__ ,re.IGNORECASE )
if start_token is None:
break
SCREAMING_SNAKE_CASE = start_token.group(1 )
SCREAMING_SNAKE_CASE = re.search(RF"""</s_{key}>""" ,lowerCamelCase__ ,re.IGNORECASE )
SCREAMING_SNAKE_CASE = start_token.group()
if end_token is None:
SCREAMING_SNAKE_CASE = tokens.replace(lowerCamelCase__ ,"""""" )
else:
SCREAMING_SNAKE_CASE = end_token.group()
SCREAMING_SNAKE_CASE = re.escape(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = re.escape(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" ,lowerCamelCase__ ,re.IGNORECASE )
if content is not None:
SCREAMING_SNAKE_CASE = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
SCREAMING_SNAKE_CASE = self.tokenajson(lowerCamelCase__ ,is_inner_value=lowerCamelCase__ ,added_vocab=lowerCamelCase__ )
if value:
if len(lowerCamelCase__ ) == 1:
SCREAMING_SNAKE_CASE = value[0]
SCREAMING_SNAKE_CASE = value
else: # leaf nodes
SCREAMING_SNAKE_CASE = []
for leaf in content.split(R"""<sep/>""" ):
SCREAMING_SNAKE_CASE = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
SCREAMING_SNAKE_CASE = leaf[1:-2] # for categorical special tokens
output[key].append(lowerCamelCase__ )
if len(output[key] ) == 1:
SCREAMING_SNAKE_CASE = output[key][0]
SCREAMING_SNAKE_CASE = tokens[tokens.find(lowerCamelCase__ ) + len(lowerCamelCase__ ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] ,is_inner_value=lowerCamelCase__ ,added_vocab=lowerCamelCase__ )
if len(lowerCamelCase__ ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,lowerCamelCase__ ,)
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]:
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,lowerCamelCase__ ,)
return self.image_processor
| 296 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : List[str] = TextToVideoSDPipeline
__snake_case : int = TEXT_TO_IMAGE_PARAMS
__snake_case : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__snake_case : Dict = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") ,up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") ,cross_attention_dim=32 ,attention_head_dim=4 ,)
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=lowerCamelCase__ ,set_alpha_to_one=lowerCamelCase__ ,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,sample_size=128 ,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act="""gelu""" ,projection_dim=512 ,)
SCREAMING_SNAKE_CASE = CLIPTextModel(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : int=0 ) -> List[Any]:
'''simple docstring'''
if str(lowerCamelCase__ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCamelCase__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = TextToVideoSDPipeline(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = """np"""
SCREAMING_SNAKE_CASE = sd_pipe(**lowerCamelCase__ ).frames
SCREAMING_SNAKE_CASE = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
SCREAMING_SNAKE_CASE = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int:
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCamelCase__ ,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,)
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCamelCase__ ,expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE = pipe.to("""cuda""" )
SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(lowerCamelCase__ ,generator=lowerCamelCase__ ,num_inference_steps=25 ,output_type="""pt""" ).frames
SCREAMING_SNAKE_CASE = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
SCREAMING_SNAKE_CASE = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
SCREAMING_SNAKE_CASE = pipe.to("""cuda""" )
SCREAMING_SNAKE_CASE = """Spiderman is surfing"""
SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(lowerCamelCase__ ,generator=lowerCamelCase__ ,num_inference_steps=2 ,output_type="""pt""" ).frames
SCREAMING_SNAKE_CASE = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 296 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCamelCase : Tuple = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 114 |
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowerCamelCase : List[str] = random.Random()
def _lowerCAmelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str]=1.0 , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : Dict=None ) -> Any:
"""simple docstring"""
if rng is None:
_SCREAMING_SNAKE_CASE =global_rng
_SCREAMING_SNAKE_CASE =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class A__ ( unittest.TestCase ):
def __init__( self : List[Any] , _a : Tuple , _a : Dict=7 , _a : List[Any]=400 , _a : List[str]=2000 , _a : Optional[Any]=10 , _a : Dict=160 , _a : Tuple=8 , _a : Any=0.0 , _a : Optional[Any]=4000 , _a : List[Any]=False , _a : Dict=True , ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =min_seq_length
_SCREAMING_SNAKE_CASE =max_seq_length
_SCREAMING_SNAKE_CASE =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_SCREAMING_SNAKE_CASE =padding_value
_SCREAMING_SNAKE_CASE =sampling_rate
_SCREAMING_SNAKE_CASE =return_attention_mask
_SCREAMING_SNAKE_CASE =do_normalize
_SCREAMING_SNAKE_CASE =feature_size
_SCREAMING_SNAKE_CASE =chunk_length
_SCREAMING_SNAKE_CASE =hop_length
def A ( self : Any ) -> Optional[Any]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def A ( self : Optional[Any] , _a : Any=False , _a : Union[str, Any]=False ) -> Optional[Any]:
'''simple docstring'''
def _flatten(_a : Union[str, Any] ):
return list(itertools.chain(*_a ) )
if equal_length:
_SCREAMING_SNAKE_CASE =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_SCREAMING_SNAKE_CASE =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_SCREAMING_SNAKE_CASE =[np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A__ ( A__ , unittest.TestCase ):
A__ = WhisperFeatureExtractor if is_speech_available() else None
def A ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =WhisperFeatureExtractionTester(self )
def A ( self : str ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE =feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
_SCREAMING_SNAKE_CASE =self.feature_extraction_class.from_pretrained(_a )
_SCREAMING_SNAKE_CASE =feat_extract_first.to_dict()
_SCREAMING_SNAKE_CASE =feat_extract_second.to_dict()
_SCREAMING_SNAKE_CASE =feat_extract_first.mel_filters
_SCREAMING_SNAKE_CASE =feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def A ( self : Tuple ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE =os.path.join(_a , 'feat_extract.json' )
feat_extract_first.to_json_file(_a )
_SCREAMING_SNAKE_CASE =self.feature_extraction_class.from_json_file(_a )
_SCREAMING_SNAKE_CASE =feat_extract_first.to_dict()
_SCREAMING_SNAKE_CASE =feat_extract_second.to_dict()
_SCREAMING_SNAKE_CASE =feat_extract_first.mel_filters
_SCREAMING_SNAKE_CASE =feat_extract_second.mel_filters
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def A ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_SCREAMING_SNAKE_CASE =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_SCREAMING_SNAKE_CASE =[np.asarray(_a ) for speech_input in speech_inputs]
# Test feature size
_SCREAMING_SNAKE_CASE =feature_extractor(_a , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_SCREAMING_SNAKE_CASE =feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
_SCREAMING_SNAKE_CASE =feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test batched
_SCREAMING_SNAKE_CASE =feature_extractor(_a , return_tensors='np' ).input_features
_SCREAMING_SNAKE_CASE =feature_extractor(_a , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_SCREAMING_SNAKE_CASE =[floats_list((1, x) )[0] for x in (800, 800, 800)]
_SCREAMING_SNAKE_CASE =np.asarray(_a )
_SCREAMING_SNAKE_CASE =feature_extractor(_a , return_tensors='np' ).input_features
_SCREAMING_SNAKE_CASE =feature_extractor(_a , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
# Test truncation required
_SCREAMING_SNAKE_CASE =[floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_SCREAMING_SNAKE_CASE =[np.asarray(_a ) for speech_input in speech_inputs]
_SCREAMING_SNAKE_CASE =[x[: feature_extractor.n_samples] for x in speech_inputs]
_SCREAMING_SNAKE_CASE =[np.asarray(_a ) for speech_input in speech_inputs_truncated]
_SCREAMING_SNAKE_CASE =feature_extractor(_a , return_tensors='np' ).input_features
_SCREAMING_SNAKE_CASE =feature_extractor(_a , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1e-3 ) )
def A ( self : Any ) -> List[Any]:
'''simple docstring'''
import torch
_SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_SCREAMING_SNAKE_CASE =np.random.rand(100 , 32 ).astype(np.floataa )
_SCREAMING_SNAKE_CASE =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_SCREAMING_SNAKE_CASE =feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_SCREAMING_SNAKE_CASE =feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def A ( self : Tuple , _a : str ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
_SCREAMING_SNAKE_CASE =ds.sort('id' ).select(range(_a ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def A ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =torch.tensor(
[
0.11_93, -0.09_46, -0.10_98, -0.01_96, 0.02_25, -0.06_90, -0.17_36, 0.09_51,
0.09_71, -0.08_17, -0.07_02, 0.01_62, 0.02_60, 0.00_17, -0.01_92, -0.16_78,
0.07_09, -0.18_67, -0.06_55, -0.02_74, -0.02_34, -0.18_84, -0.05_16, -0.05_54,
-0.02_74, -0.14_25, -0.14_23, 0.08_37, 0.03_77, -0.08_54
] )
# fmt: on
_SCREAMING_SNAKE_CASE =self._load_datasamples(1 )
_SCREAMING_SNAKE_CASE =WhisperFeatureExtractor()
_SCREAMING_SNAKE_CASE =feature_extractor(_a , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , _a , atol=1e-4 ) )
def A ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_SCREAMING_SNAKE_CASE =self._load_datasamples(1 )[0]
_SCREAMING_SNAKE_CASE =((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
_SCREAMING_SNAKE_CASE =feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_a )[0]
self.assertTrue(np.all(np.mean(_a ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_a ) - 1 ) < 1e-3 ) )
| 114 | 1 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase )
lowerCAmelCase__ : Tuple = -1
lowerCAmelCase__ : List[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
lowerCAmelCase__ : List[Any] = model.generate(__lowerCAmelCase ,max_new_tokens=1_0 ,do_sample=__lowerCAmelCase )
lowerCAmelCase__ : Optional[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCAmelCase__ : Union[str, Any] = TextStreamer(__lowerCAmelCase )
model.generate(__lowerCAmelCase ,max_new_tokens=1_0 ,do_sample=__lowerCAmelCase ,streamer=__lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCAmelCase__ : int = cs.out[:-1]
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
def __lowerCAmelCase ( self : Dict ):
lowerCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase__ : Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase )
lowerCAmelCase__ : Tuple = -1
lowerCAmelCase__ : List[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
lowerCAmelCase__ : Optional[int] = model.generate(__lowerCAmelCase ,max_new_tokens=1_0 ,do_sample=__lowerCAmelCase )
lowerCAmelCase__ : List[str] = tokenizer.decode(greedy_ids[0] )
lowerCAmelCase__ : Tuple = TextIteratorStreamer(__lowerCAmelCase )
lowerCAmelCase__ : Tuple = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
lowerCAmelCase__ : List[Any] = Thread(target=model.generate ,kwargs=__lowerCAmelCase )
thread.start()
lowerCAmelCase__ : int = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
def __lowerCAmelCase ( self : str ):
lowerCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase__ : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase )
lowerCAmelCase__ : Tuple = -1
lowerCAmelCase__ : Optional[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
lowerCAmelCase__ : int = model.generate(__lowerCAmelCase ,max_new_tokens=1_0 ,do_sample=__lowerCAmelCase )
lowerCAmelCase__ : Optional[Any] = greedy_ids[:, input_ids.shape[1] :]
lowerCAmelCase__ : int = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCAmelCase__ : Any = TextStreamer(__lowerCAmelCase ,skip_prompt=__lowerCAmelCase )
model.generate(__lowerCAmelCase ,max_new_tokens=1_0 ,do_sample=__lowerCAmelCase ,streamer=__lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCAmelCase__ : Union[str, Any] = cs.out[:-1]
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''distilgpt2''' )
lowerCAmelCase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(__lowerCAmelCase )
lowerCAmelCase__ : str = -1
lowerCAmelCase__ : Any = torch.ones((1, 5) ,device=__lowerCAmelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCAmelCase__ : List[Any] = TextStreamer(__lowerCAmelCase ,skip_special_tokens=__lowerCAmelCase )
model.generate(__lowerCAmelCase ,max_new_tokens=1 ,do_sample=__lowerCAmelCase ,streamer=__lowerCAmelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCAmelCase__ : Any = cs.out[:-1] # Remove the final "\n"
lowerCAmelCase__ : int = tokenizer(__lowerCAmelCase ,return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape ,(1, 1) )
def __lowerCAmelCase ( self : Tuple ):
lowerCAmelCase__ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase__ : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(__lowerCAmelCase )
lowerCAmelCase__ : Union[str, Any] = -1
lowerCAmelCase__ : Any = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(__lowerCAmelCase )
lowerCAmelCase__ : List[str] = TextIteratorStreamer(__lowerCAmelCase ,timeout=0.001 )
lowerCAmelCase__ : str = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer}
lowerCAmelCase__ : List[Any] = Thread(target=model.generate ,kwargs=__lowerCAmelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__lowerCAmelCase ):
lowerCAmelCase__ : Optional[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
| 106 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
# TODO: upload to AWS
lowerCAmelCase__ = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class __snake_case ( _lowercase):
snake_case__ : int = "retribert"
def __init__( self : Optional[int] , __lowerCAmelCase : str=3_0_5_2_2 , __lowerCAmelCase : Tuple=7_6_8 , __lowerCAmelCase : Union[str, Any]=8 , __lowerCAmelCase : Any=1_2 , __lowerCAmelCase : Optional[int]=3_0_7_2 , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=5_1_2 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : Optional[Any]=1E-12 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Any=1_2_8 , __lowerCAmelCase : Optional[int]=0 , **__lowerCAmelCase : str , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Dict = vocab_size
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : Dict = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : int = hidden_act
_lowerCamelCase : str = intermediate_size
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[int] = max_position_embeddings
_lowerCamelCase : List[Any] = type_vocab_size
_lowerCamelCase : Any = initializer_range
_lowerCamelCase : Optional[int] = layer_norm_eps
_lowerCamelCase : int = share_encoders
_lowerCamelCase : Optional[Any] = projection_dim
| 72 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __magic_name__ :
def __init__( self , snake_case , snake_case=None , snake_case=None , snake_case=None , snake_case="resnet50" , snake_case=3 , snake_case=3_2 , snake_case=3 , snake_case=True , snake_case=True , ) -> str:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =parent
_UpperCAmelCase : List[str] =out_indices if out_indices is not None else [4]
_UpperCAmelCase : Tuple =stage_names
_UpperCAmelCase : Any =out_features
_UpperCAmelCase : List[Any] =backbone
_UpperCAmelCase : Any =batch_size
_UpperCAmelCase : Optional[Any] =image_size
_UpperCAmelCase : Optional[int] =num_channels
_UpperCAmelCase : Tuple =use_pretrained_backbone
_UpperCAmelCase : Optional[Any] =is_training
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCAmelCase : Optional[Any] =self.get_config()
return config, pixel_values
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowerCAmelCase ( self , snake_case , snake_case) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str =TimmBackbone(config=snake_case)
model.to(snake_case)
model.eval()
with torch.no_grad():
_UpperCAmelCase : Any =model(snake_case)
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , )
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[int] =self.prepare_config_and_inputs()
_UpperCAmelCase : List[Any] =config_and_inputs
_UpperCAmelCase : str ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __magic_name__ ( lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,unittest.TestCase ):
UpperCAmelCase =(TimmBackbone,) if is_torch_available() else ()
UpperCAmelCase ={"feature-extraction": TimmBackbone} if is_torch_available() else {}
UpperCAmelCase =False
UpperCAmelCase =False
UpperCAmelCase =False
UpperCAmelCase =False
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] =TimmBackboneModelTester(self)
_UpperCAmelCase : Dict =ConfigTester(self , config_class=snake_case , has_text_modality=snake_case)
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : int ='resnet18'
_UpperCAmelCase : Optional[Any] ='microsoft/resnet-18'
_UpperCAmelCase : List[str] =AutoBackbone.from_pretrained(snake_case , use_timm_backbone=snake_case)
_UpperCAmelCase : str =AutoBackbone.from_pretrained(snake_case)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(len(timm_model.stage_names) , len(transformers_model.stage_names))
self.assertEqual(timm_model.channels , transformers_model.channels)
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,))
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names) - 1])
_UpperCAmelCase : str =AutoBackbone.from_pretrained(snake_case , use_timm_backbone=snake_case , out_indices=[1, 2, 3])
_UpperCAmelCase : int =AutoBackbone.from_pretrained(snake_case , out_indices=[1, 2, 3])
self.assertEqual(timm_model.out_indices , transformers_model.out_indices)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(timm_model.channels , transformers_model.channels)
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking')
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute')
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side')
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds')
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint')
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.')
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone')
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.')
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.')
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('Safetensors is not supported by timm.')
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Optional[int] =model_class(snake_case)
_UpperCAmelCase : Dict =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Any =[*signature.parameters.keys()]
_UpperCAmelCase : int =['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case)
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCAmelCase : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : List[Any] =True
_UpperCAmelCase : Union[str, Any] =self.has_attentions
# no need to test all models as different heads yield the same functionality
_UpperCAmelCase : Union[str, Any] =self.all_model_classes[0]
_UpperCAmelCase : int =model_class(snake_case)
model.to(snake_case)
_UpperCAmelCase : List[str] =self._prepare_for_class(snake_case , snake_case)
_UpperCAmelCase : Optional[Any] =model(**snake_case)
_UpperCAmelCase : Optional[int] =outputs[0][-1]
# Encoder-/Decoder-only models
_UpperCAmelCase : Any =outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_UpperCAmelCase : Dict =outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=snake_case)
self.assertIsNotNone(hidden_states.grad)
if self.has_attentions:
self.assertIsNotNone(attentions.grad)
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Dict =model_class(snake_case)
model.to(snake_case)
model.eval()
_UpperCAmelCase : Optional[Any] =model(**snake_case)
self.assertEqual(len(result.feature_maps) , len(config.out_indices))
self.assertEqual(len(model.channels) , len(config.out_indices))
# Check output of last stage is taken if out_features=None, out_indices=None
_UpperCAmelCase : str =copy.deepcopy(snake_case)
_UpperCAmelCase : List[Any] =None
_UpperCAmelCase : Any =model_class(snake_case)
model.to(snake_case)
model.eval()
_UpperCAmelCase : int =model(**snake_case)
self.assertEqual(len(result.feature_maps) , 1)
self.assertEqual(len(model.channels) , 1)
# Check backbone can be initialized with fresh weights
_UpperCAmelCase : str =copy.deepcopy(snake_case)
_UpperCAmelCase : Any =False
_UpperCAmelCase : Tuple =model_class(snake_case)
model.to(snake_case)
model.eval()
_UpperCAmelCase : Optional[Any] =model(**snake_case)
| 351 |
'''simple docstring'''
from typing import Any
def lowerCamelCase__ ( __lowerCamelCase : list , __lowerCamelCase : list , __lowerCamelCase : dict , __lowerCamelCase : dict , __lowerCamelCase : dict , ):
'''simple docstring'''
_validation(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
# Creates data structures and fill initial step
_UpperCAmelCase : dict ={}
_UpperCAmelCase : dict ={}
for state in states_space:
_UpperCAmelCase : int =observations_space[0]
_UpperCAmelCase : int =(
initial_probabilities[state] * emission_probabilities[state][observation]
)
_UpperCAmelCase : int =None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__lowerCamelCase ) ):
_UpperCAmelCase : List[Any] =observations_space[o]
_UpperCAmelCase : Optional[int] =observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
_UpperCAmelCase : List[str] =''
_UpperCAmelCase : Dict =-1
for k_state in states_space:
_UpperCAmelCase : List[str] =(
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
_UpperCAmelCase : int =probability
_UpperCAmelCase : List[Any] =k_state
# Update probabilities and pointers dicts
_UpperCAmelCase : str =(
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
_UpperCAmelCase : List[Any] =arg_max
# The final observation
_UpperCAmelCase : int =observations_space[len(__lowerCamelCase ) - 1]
# argmax for given final observation
_UpperCAmelCase : Any =''
_UpperCAmelCase : Union[str, Any] =-1
for k_state in states_space:
_UpperCAmelCase : Optional[int] =probabilities[(k_state, final_observation)]
if probability > max_probability:
_UpperCAmelCase : Union[str, Any] =probability
_UpperCAmelCase : int =k_state
_UpperCAmelCase : int =arg_max
# Process pointers backwards
_UpperCAmelCase : List[str] =last_state
_UpperCAmelCase : Optional[int] =[]
for o in range(len(__lowerCamelCase ) - 1 , -1 , -1 ):
result.append(__lowerCamelCase )
_UpperCAmelCase : Optional[Any] =pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , ):
'''simple docstring'''
_validate_not_empty(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
_validate_lists(__lowerCamelCase , __lowerCamelCase )
_validate_dicts(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , ):
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('There\'s an empty parameter' )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : Any ):
'''simple docstring'''
_validate_list(__lowerCamelCase , 'observations_space' )
_validate_list(__lowerCamelCase , 'states_space' )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : str ):
'''simple docstring'''
if not isinstance(_object , __lowerCamelCase ):
_UpperCAmelCase : Any =f"{var_name} must be a list"
raise ValueError(__lowerCamelCase )
else:
for x in _object:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
_UpperCAmelCase : Optional[int] =f"{var_name} must be a list of strings"
raise ValueError(__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Any , ):
'''simple docstring'''
_validate_dict(__lowerCamelCase , 'initial_probabilities' , __lowerCamelCase )
_validate_nested_dict(__lowerCamelCase , 'transition_probabilities' )
_validate_nested_dict(__lowerCamelCase , 'emission_probabilities' )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : str ):
'''simple docstring'''
_validate_dict(_object , __lowerCamelCase , __lowerCamelCase )
for x in _object.values():
_validate_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : type , __lowerCamelCase : bool = False ):
'''simple docstring'''
if not isinstance(_object , __lowerCamelCase ):
_UpperCAmelCase : List[str] =f"{var_name} must be a dict"
raise ValueError(__lowerCamelCase )
if not all(isinstance(__lowerCamelCase , __lowerCamelCase ) for x in _object ):
_UpperCAmelCase : str =f"{var_name} all keys must be strings"
raise ValueError(__lowerCamelCase )
if not all(isinstance(__lowerCamelCase , __lowerCamelCase ) for x in _object.values() ):
_UpperCAmelCase : int ='nested dictionary ' if nested else ''
_UpperCAmelCase : Optional[int] =f"{var_name} {nested_text}all values must be {value_type.__name__}"
raise ValueError(__lowerCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 242 | 0 |
"""simple docstring"""
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
_a = True
from torch.cuda.amp import autocast
_a = logging.getLogger(__name__)
def lowerCamelCase__ ( __snake_case=None, __snake_case=None ) -> List[str]:
"""simple docstring"""
return field(default_factory=lambda: default, metadata=__snake_case )
@dataclass
class _UpperCAmelCase:
lowercase__ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowercase__ = field(
default=lowerCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowercase__ = field(
default=lowerCamelCase , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
lowercase__ = field(
default=0.1 , metadata={'help': 'The dropout ratio for the attention probabilities.'} )
lowercase__ = field(
default=0.1 , metadata={'help': 'The dropout ratio for activations inside the fully connected layer.'} )
lowercase__ = field(
default=0.1 , metadata={
'help': 'The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'
} , )
lowercase__ = field(
default=0.1 , metadata={'help': 'The dropout probabilitiy for all 1D convolutional layers in feature extractor.'} , )
lowercase__ = field(
default=0.05 , metadata={
'help': (
'Propability of each feature vector along the time axis to be chosen as the start of the vector'
'span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'
'vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'
)
} , )
lowercase__ = field(default=0.0 , metadata={'help': 'The LayerDrop probability.'} )
@dataclass
class _UpperCAmelCase:
lowercase__ = field(
default=lowerCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowercase__ = field(
default='train+validation' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowercase__ = field(
default=lowerCamelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
lowercase__ = field(
default=lowerCamelCase , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowercase__ = field(
default=lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowercase__ = field(
default=lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of validation examples to this '
'value if set.'
)
} , )
lowercase__ = list_field(
default=[',', '?', '.', '!', '-', ';', ':', '""', '%', '\'', '"', '�'] , metadata={'help': 'A list of characters to remove from the transcripts.'} , )
@dataclass
class _UpperCAmelCase:
lowercase__ = 42
lowercase__ = True
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
def __call__( self , __a) -> Dict[str, torch.Tensor]:
'''simple docstring'''
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
_UpperCamelCase = [{'''input_values''': feature['''input_values''']} for feature in features]
_UpperCamelCase = [{'''input_ids''': feature['''labels''']} for feature in features]
_UpperCamelCase = self.processor.pad(
__a , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
_UpperCamelCase = self.processor.pad(
labels=__a , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='''pt''' , )
# replace padding with -100 to ignore loss correctly
_UpperCamelCase = labels_batch['''input_ids'''].masked_fill(labels_batch.attention_mask.ne(1) , -1_00)
_UpperCamelCase = labels
return batch
class _UpperCAmelCase( lowerCamelCase ):
def UpperCAmelCase ( self , __a , __a) -> torch.Tensor:
'''simple docstring'''
model.train()
_UpperCamelCase = self._prepare_inputs(__a)
if self.use_amp:
with autocast():
_UpperCamelCase = self.compute_loss(__a , __a)
else:
_UpperCamelCase = self.compute_loss(__a , __a)
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
_UpperCamelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_UpperCamelCase = loss.sum() / (inputs['''labels'''] >= 0).sum()
else:
raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''')
if self.args.gradient_accumulation_steps > 1:
_UpperCamelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__a).backward()
elif self.use_apex:
with amp.scale_loss(__a , self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__a)
else:
loss.backward()
return loss.detach()
def lowerCamelCase__ ( ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', handlers=[logging.StreamHandler(sys.stdout )], )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''', __snake_case )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
_UpperCamelCase = datasets.load_dataset(
'''common_voice''', data_args.dataset_config_name, split=data_args.train_split_name )
_UpperCamelCase = datasets.load_dataset('''common_voice''', data_args.dataset_config_name, split='''test''' )
# Create and save tokenizer
_UpperCamelCase = F'''[{"".join(data_args.chars_to_ignore )}]'''
def remove_special_characters(__snake_case ):
_UpperCamelCase = re.sub(__snake_case, '''''', batch['''sentence'''] ).lower() + ''' '''
return batch
_UpperCamelCase = train_dataset.map(__snake_case, remove_columns=['''sentence'''] )
_UpperCamelCase = eval_dataset.map(__snake_case, remove_columns=['''sentence'''] )
def extract_all_chars(__snake_case ):
_UpperCamelCase = ''' '''.join(batch['''text'''] )
_UpperCamelCase = list(set(__snake_case ) )
return {"vocab": [vocab], "all_text": [all_text]}
_UpperCamelCase = train_dataset.map(
__snake_case, batched=__snake_case, batch_size=-1, keep_in_memory=__snake_case, remove_columns=train_dataset.column_names, )
_UpperCamelCase = train_dataset.map(
__snake_case, batched=__snake_case, batch_size=-1, keep_in_memory=__snake_case, remove_columns=eval_dataset.column_names, )
_UpperCamelCase = list(set(vocab_train['''vocab'''][0] ) | set(vocab_test['''vocab'''][0] ) )
_UpperCamelCase = {v: k for k, v in enumerate(__snake_case )}
_UpperCamelCase = vocab_dict[''' ''']
del vocab_dict[" "]
_UpperCamelCase = len(__snake_case )
_UpperCamelCase = len(__snake_case )
with open('''vocab.json''', '''w''' ) as vocab_file:
json.dump(__snake_case, __snake_case )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = WavaVecaCTCTokenizer(
'''vocab.json''', unk_token='''[UNK]''', pad_token='''[PAD]''', word_delimiter_token='''|''', )
_UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=1_60_00, padding_value=0.0, do_normalize=__snake_case, return_attention_mask=__snake_case )
_UpperCamelCase = WavaVecaProcessor(feature_extractor=__snake_case, tokenizer=__snake_case )
_UpperCamelCase = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir, activation_dropout=model_args.activation_dropout, attention_dropout=model_args.attention_dropout, hidden_dropout=model_args.hidden_dropout, feat_proj_dropout=model_args.feat_proj_dropout, mask_time_prob=model_args.mask_time_prob, gradient_checkpointing=training_args.gradient_checkpointing, layerdrop=model_args.layerdrop, ctc_loss_reduction='''mean''', pad_token_id=processor.tokenizer.pad_token_id, vocab_size=len(processor.tokenizer ), )
if data_args.max_train_samples is not None:
_UpperCamelCase = min(len(__snake_case ), data_args.max_train_samples )
_UpperCamelCase = train_dataset.select(range(__snake_case ) )
if data_args.max_val_samples is not None:
_UpperCamelCase = eval_dataset.select(range(data_args.max_val_samples ) )
_UpperCamelCase = torchaudio.transforms.Resample(4_80_00, 1_60_00 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(__snake_case ):
_UpperCamelCase , _UpperCamelCase = torchaudio.load(batch['''path'''] )
_UpperCamelCase = resampler(__snake_case ).squeeze().numpy()
_UpperCamelCase = 1_60_00
_UpperCamelCase = batch['''text''']
return batch
_UpperCamelCase = train_dataset.map(
__snake_case, remove_columns=train_dataset.column_names, num_proc=data_args.preprocessing_num_workers, )
_UpperCamelCase = eval_dataset.map(
__snake_case, remove_columns=eval_dataset.column_names, num_proc=data_args.preprocessing_num_workers, )
def prepare_dataset(__snake_case ):
# check that all files have the correct sampling rate
assert (
len(set(batch['''sampling_rate'''] ) ) == 1
), F'''Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'''
_UpperCamelCase = processor(
audio=batch['''speech'''], text=batch['''target_text'''], sampling_rate=batch['''sampling_rate'''][0] )
batch.update(__snake_case )
return batch
_UpperCamelCase = train_dataset.map(
__snake_case, remove_columns=train_dataset.column_names, batch_size=training_args.per_device_train_batch_size, batched=__snake_case, num_proc=data_args.preprocessing_num_workers, )
_UpperCamelCase = eval_dataset.map(
__snake_case, remove_columns=eval_dataset.column_names, batch_size=training_args.per_device_train_batch_size, batched=__snake_case, num_proc=data_args.preprocessing_num_workers, )
# Metric
_UpperCamelCase = datasets.load_metric('''wer''' )
def compute_metrics(__snake_case ):
_UpperCamelCase = pred.predictions
_UpperCamelCase = np.argmax(__snake_case, axis=-1 )
_UpperCamelCase = processor.tokenizer.pad_token_id
_UpperCamelCase = processor.batch_decode(__snake_case )
# we do not want to group tokens when computing the metrics
_UpperCamelCase = processor.batch_decode(pred.label_ids, group_tokens=__snake_case )
_UpperCamelCase = wer_metric.compute(predictions=__snake_case, references=__snake_case )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
_UpperCamelCase = DataCollatorCTCWithPadding(processor=__snake_case, padding=__snake_case )
# Initialize our Trainer
_UpperCamelCase = CTCTrainer(
model=__snake_case, data_collator=__snake_case, args=__snake_case, compute_metrics=__snake_case, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=processor.feature_extractor, )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_UpperCamelCase = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
_UpperCamelCase = model_args.model_name_or_path
else:
_UpperCamelCase = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
_UpperCamelCase = trainer.train(resume_from_checkpoint=__snake_case )
trainer.save_model()
_UpperCamelCase = train_result.metrics
_UpperCamelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__snake_case )
)
_UpperCamelCase = min(__snake_case, len(__snake_case ) )
trainer.log_metrics('''train''', __snake_case )
trainer.save_metrics('''train''', __snake_case )
trainer.save_state()
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = data_args.max_val_samples if data_args.max_val_samples is not None else len(__snake_case )
_UpperCamelCase = min(__snake_case, len(__snake_case ) )
trainer.log_metrics('''eval''', __snake_case )
trainer.save_metrics('''eval''', __snake_case )
return results
if __name__ == "__main__":
main()
| 194 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> str:
"""simple docstring"""
with open(__snake_case ) as metadata_file:
_UpperCamelCase = json.load(__snake_case )
_UpperCamelCase = LukeConfig(use_entity_aware_attention=__snake_case, **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_UpperCamelCase = torch.load(__snake_case, map_location='''cpu''' )
# Load the entity vocab file
_UpperCamelCase = load_entity_vocab(__snake_case )
_UpperCamelCase = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_UpperCamelCase = AddedToken('''<ent>''', lstrip=__snake_case, rstrip=__snake_case )
_UpperCamelCase = AddedToken('''<ent2>''', lstrip=__snake_case, rstrip=__snake_case )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(__snake_case )
with open(os.path.join(__snake_case, LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ), '''w''' ) as f:
json.dump(__snake_case, __snake_case )
_UpperCamelCase = LukeTokenizer.from_pretrained(__snake_case )
# Initialize the embeddings of the special tokens
_UpperCamelCase = state_dict['''embeddings.word_embeddings.weight''']
_UpperCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
_UpperCamelCase = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
_UpperCamelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_UpperCamelCase = F'''encoder.layer.{layer_index}.attention.self.'''
_UpperCamelCase = state_dict[prefix + matrix_name]
_UpperCamelCase = state_dict[prefix + matrix_name]
_UpperCamelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_UpperCamelCase = state_dict['''entity_embeddings.entity_embeddings.weight''']
_UpperCamelCase = entity_emb[entity_vocab['''[MASK]''']]
_UpperCamelCase = LukeModel(config=__snake_case ).eval()
_UpperCamelCase , _UpperCamelCase = model.load_state_dict(__snake_case, strict=__snake_case )
if not (len(__snake_case ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'''Missing keys {", ".join(__snake_case )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
F''' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}''' )
# Check outputs
_UpperCamelCase = LukeTokenizer.from_pretrained(__snake_case, task='''entity_classification''' )
_UpperCamelCase = (
'''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'''
''' new world number one avoid a humiliating second- round exit at Wimbledon .'''
)
_UpperCamelCase = (39, 42)
_UpperCamelCase = tokenizer(__snake_case, entity_spans=[span], add_prefix_space=__snake_case, return_tensors='''pt''' )
_UpperCamelCase = model(**__snake_case )
# Verify word hidden states
if model_size == "large":
_UpperCamelCase = torch.Size((1, 42, 10_24) )
_UpperCamelCase = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
_UpperCamelCase = torch.Size((1, 42, 7_68) )
_UpperCamelCase = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3], __snake_case, atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_UpperCamelCase = torch.Size((1, 1, 10_24) )
_UpperCamelCase = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
_UpperCamelCase = torch.Size((1, 1, 7_68) )
_UpperCamelCase = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], __snake_case, atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(__snake_case ) )
model.save_pretrained(__snake_case )
def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = {}
with open(__snake_case, '''r''', encoding='''utf-8''' ) as f:
for index, line in enumerate(__snake_case ):
_UpperCamelCase , _UpperCamelCase = line.rstrip().split('''\t''' )
_UpperCamelCase = index
return entity_vocab
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
_a = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 194 | 1 |
'''simple docstring'''
from collections.abc import Sequence
def _snake_case ( _SCREAMING_SNAKE_CASE : Sequence[float] , _SCREAMING_SNAKE_CASE : bool = False ) -> float:
"""simple docstring"""
if not arr:
return 0
lowerCAmelCase = 0 if allow_empty_subarrays else float("""-inf""" )
lowerCAmelCase = 0.0
for num in arr:
lowerCAmelCase = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowerCAmelCase = max(snake_case__ , snake_case__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'''{max_subarray_sum(nums) = }''') | 365 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __snake_case:
'''simple docstring'''
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.0_2 , A_=3 , A_=4 , A_=None , ) -> Dict:
lowerCAmelCase = parent
lowerCAmelCase = 13
lowerCAmelCase = 7
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = 99
lowerCAmelCase = 384
lowerCAmelCase = 2
lowerCAmelCase = 4
lowerCAmelCase = 37
lowerCAmelCase = """gelu"""
lowerCAmelCase = 0.1
lowerCAmelCase = 0.1
lowerCAmelCase = 512
lowerCAmelCase = 16
lowerCAmelCase = 2
lowerCAmelCase = 0.0_2
lowerCAmelCase = 3
lowerCAmelCase = 4
lowerCAmelCase = 128
lowerCAmelCase = 2
lowerCAmelCase = 9
lowerCAmelCase = 1
lowerCAmelCase = None
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
lowerCAmelCase = TFConvBertModel(config=A_ )
lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(A_ )
lowerCAmelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[Any]:
lowerCAmelCase = TFConvBertForMaskedLM(config=A_ )
lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[int]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFConvBertForSequenceClassification(config=A_ )
lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Any:
lowerCAmelCase = self.num_choices
lowerCAmelCase = TFConvBertForMultipleChoice(config=A_ )
lowerCAmelCase = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowerCAmelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFConvBertForTokenClassification(config=A_ )
lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[int]:
lowerCAmelCase = TFConvBertForQuestionAnswering(config=A_ )
lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase = model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self ) -> Any:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
), (
lowerCAmelCase
),
) = config_and_inputs
lowerCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __snake_case( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase : Union[str, Any] = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Dict = False
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase = TFConvBertModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=A_ , hidden_size=37 )
def __snake_case ( self ) -> Tuple:
self.config_tester.run_common_tests()
def __snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def __snake_case ( self ) -> List[str]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def __snake_case ( self ) -> str:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def __snake_case ( self ) -> Any:
lowerCAmelCase, lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = True
lowerCAmelCase = True
if hasattr(A_ , """use_cache""" ):
lowerCAmelCase = True
lowerCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCAmelCase = getattr(self.model_tester , """key_length""" , A_ )
for model_class in self.all_model_classes:
lowerCAmelCase = self._prepare_for_class(A_ , A_ )
lowerCAmelCase = model_class(A_ )
lowerCAmelCase = len(model(A_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ , saved_model=A_ )
lowerCAmelCase = os.path.join(A_ , """saved_model""" , """1""" )
lowerCAmelCase = tf.keras.models.load_model(A_ )
lowerCAmelCase = model(A_ )
if self.is_encoder_decoder:
lowerCAmelCase = outputs["""encoder_hidden_states"""]
lowerCAmelCase = outputs["""encoder_attentions"""]
else:
lowerCAmelCase = outputs["""hidden_states"""]
lowerCAmelCase = outputs["""attentions"""]
self.assertEqual(len(A_ ) , A_ )
lowerCAmelCase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(A_ ) , A_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(A_ )
def __snake_case ( self ) -> str:
lowerCAmelCase, lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = True
lowerCAmelCase = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
lowerCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCAmelCase = getattr(self.model_tester , """key_length""" , A_ )
lowerCAmelCase = getattr(self.model_tester , """key_length""" , A_ )
def check_decoder_attentions_output(A_ ):
lowerCAmelCase = len(A_ )
self.assertEqual(out_len % 2 , 0 )
lowerCAmelCase = outputs.decoder_attentions
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(A_ ):
lowerCAmelCase = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(A_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = model_class(A_ )
lowerCAmelCase = model(self._prepare_for_class(A_ , A_ ) )
lowerCAmelCase = len(A_ )
self.assertEqual(config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
if self.is_encoder_decoder:
lowerCAmelCase = model_class(A_ )
lowerCAmelCase = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(config.output_hidden_states , A_ )
check_decoder_attentions_output(A_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCAmelCase = True
lowerCAmelCase = model_class(A_ )
lowerCAmelCase = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
# Check attention is always last and order is fine
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = model_class(A_ )
lowerCAmelCase = model(self._prepare_for_class(A_ , A_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(A_ ) )
self.assertEqual(model.config.output_hidden_states , A_ )
check_encoder_attentions_output(A_ )
@require_tf
class __snake_case( unittest.TestCase ):
'''simple docstring'''
@slow
def __snake_case ( self ) -> Any:
lowerCAmelCase = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase = model(A_ )[0]
lowerCAmelCase = [1, 6, 768]
self.assertEqual(output.shape , A_ )
lowerCAmelCase = tf.constant(
[
[
[-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2],
[0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4],
[0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1e-4 ) | 187 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['BeitFeatureExtractor']
__UpperCAmelCase = ['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 84 |
def UpperCamelCase ( __magic_name__ : str ) -> int:
"""simple docstring"""
assert column_title.isupper()
lowercase__ = 0
lowercase__ = len(__magic_name__ ) - 1
lowercase__ = 0
while index >= 0:
lowercase__ = (ord(column_title[index] ) - 64) * pow(26 , __magic_name__ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 305 | 0 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def _lowerCAmelCase ( A__: str ):
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
__magic_name__ = """
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
"""
class lowercase ( __A ):
'''simple docstring'''
@staticmethod
def snake_case_ ( _snake_case ) -> Any:
"""simple docstring"""
UpperCAmelCase = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=__lowercase , required=__lowercase , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=__lowercase , required=__lowercase , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=__lowercase , required=__lowercase , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=__lowercase , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=__lowercase , default=__lowercase , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=__lowercase )
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , *_snake_case , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(f"""Loading model {model_type}""" )
UpperCAmelCase = model_type
UpperCAmelCase = tf_checkpoint
UpperCAmelCase = pytorch_dump_output
UpperCAmelCase = config
UpperCAmelCase = finetuning_task_name
def snake_case_ ( self ) -> List[Any]:
"""simple docstring"""
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(__lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
if "ckpt" in self._tf_checkpoint.lower():
UpperCAmelCase = self._tf_checkpoint
UpperCAmelCase = ''''''
else:
UpperCAmelCase = self._tf_checkpoint
UpperCAmelCase = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
__lowercase , self._config , self._pytorch_dump_output , __lowercase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__lowercase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 359 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 152 | 0 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class a_ :
'''simple docstring'''
def __init__( self , A=None , A=None ) -> Optional[int]:
# Input as list
_SCREAMING_SNAKE_CASE = list(poly_a or [0] )[:]
_SCREAMING_SNAKE_CASE = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
_SCREAMING_SNAKE_CASE = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
_SCREAMING_SNAKE_CASE = len(self.polyB )
# Add 0 to make lengths equal a power of 2
_SCREAMING_SNAKE_CASE = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
_SCREAMING_SNAKE_CASE = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
_SCREAMING_SNAKE_CASE = self.__multiply()
def snake_case_( self , A ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(A ) <= 1:
return dft[0]
#
_SCREAMING_SNAKE_CASE = self.c_max_length // 2
while next_ncol > 0:
_SCREAMING_SNAKE_CASE = [[] for i in range(A )]
_SCREAMING_SNAKE_CASE = self.root**next_ncol
# First half of next step
_SCREAMING_SNAKE_CASE = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(A ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
_SCREAMING_SNAKE_CASE = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(A ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
_SCREAMING_SNAKE_CASE = new_dft
_SCREAMING_SNAKE_CASE = next_ncol // 2
return dft[0]
def snake_case_( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = self.__dft("""A""" )
_SCREAMING_SNAKE_CASE = self.__dft("""B""" )
_SCREAMING_SNAKE_CASE = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
_SCREAMING_SNAKE_CASE = 2
while next_ncol <= self.c_max_length:
_SCREAMING_SNAKE_CASE = [[] for i in range(A )]
_SCREAMING_SNAKE_CASE = self.root ** (next_ncol // 2)
_SCREAMING_SNAKE_CASE = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
_SCREAMING_SNAKE_CASE = new_inverse_c
next_ncol *= 2
# Unpack
_SCREAMING_SNAKE_CASE = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = """A = """ + """ + """.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) )
_SCREAMING_SNAKE_CASE = """B = """ + """ + """.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) )
_SCREAMING_SNAKE_CASE = """A*B = """ + """ + """.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.product ) )
return f'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
__UpperCAmelCase = {
'''b0''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1_408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1_536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1_792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2_304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2_560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def UpperCamelCase ( snake_case__ : int ) -> Optional[int]:
UpperCamelCase : str = EfficientNetConfig()
UpperCamelCase : Union[str, Any] = CONFIG_MAP[model_name]['hidden_dim']
UpperCamelCase : Union[str, Any] = CONFIG_MAP[model_name]['width_coef']
UpperCamelCase : str = CONFIG_MAP[model_name]['depth_coef']
UpperCamelCase : List[str] = CONFIG_MAP[model_name]['image_size']
UpperCamelCase : List[str] = CONFIG_MAP[model_name]['dropout_rate']
UpperCamelCase : str = CONFIG_MAP[model_name]['dw_padding']
UpperCamelCase : str = 'huggingface/label-files'
UpperCamelCase : Optional[Any] = 'imagenet-1k-id2label.json'
UpperCamelCase : Optional[Any] = 1000
UpperCamelCase : Dict = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
UpperCamelCase : Tuple = {int(snake_case__ ): v for k, v in idalabel.items()}
UpperCamelCase : Optional[int] = idalabel
UpperCamelCase : Tuple = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase ( ) -> Tuple:
UpperCamelCase : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
def UpperCamelCase ( snake_case__ : List[str] ) -> List[Any]:
UpperCamelCase : int = CONFIG_MAP[model_name]['image_size']
UpperCamelCase : List[str] = EfficientNetImageProcessor(
size={'height': size, 'width': size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=snake_case__ , )
return preprocessor
def UpperCamelCase ( snake_case__ : Optional[int] ) -> Dict:
UpperCamelCase : int = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )]
UpperCamelCase : str = sorted(set(snake_case__ ) )
UpperCamelCase : int = len(snake_case__ )
UpperCamelCase : str = {b: str(snake_case__ ) for b, i in zip(snake_case__ , range(snake_case__ ) )}
UpperCamelCase : Optional[int] = []
rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') )
rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') )
rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') )
rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') )
rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') )
for b in block_names:
UpperCamelCase : Union[str, Any] = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') )
rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') )
rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') )
rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') )
rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') )
UpperCamelCase : List[str] = {}
for item in rename_keys:
if item[0] in original_param_names:
UpperCamelCase : Dict = 'efficientnet.' + item[1]
UpperCamelCase : Dict = 'classifier.weight'
UpperCamelCase : Dict = 'classifier.bias'
return key_mapping
def UpperCamelCase ( snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : int ) -> Dict:
for key, value in tf_params.items():
if "normalization" in key:
continue
UpperCamelCase : Any = key_mapping[key]
if "_conv" in key and "kernel" in key:
UpperCamelCase : str = torch.from_numpy(snake_case__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
UpperCamelCase : Any = torch.from_numpy(snake_case__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
UpperCamelCase : str = torch.from_numpy(np.transpose(snake_case__ ) )
else:
UpperCamelCase : str = torch.from_numpy(snake_case__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(snake_case__ )
@torch.no_grad()
def UpperCamelCase ( snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : int , snake_case__ : Optional[int] ) -> Any:
UpperCamelCase : Union[str, Any] = model_classes[model_name](
include_top=snake_case__ , weights='imagenet' , input_tensor=snake_case__ , input_shape=snake_case__ , pooling=snake_case__ , classes=1000 , classifier_activation='softmax' , )
UpperCamelCase : Optional[int] = original_model.trainable_variables
UpperCamelCase : Optional[int] = original_model.non_trainable_variables
UpperCamelCase : Tuple = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
UpperCamelCase : List[Any] = param.numpy()
UpperCamelCase : List[str] = list(tf_params.keys() )
# Load HuggingFace model
UpperCamelCase : str = get_efficientnet_config(snake_case__ )
UpperCamelCase : Any = EfficientNetForImageClassification(snake_case__ ).eval()
UpperCamelCase : Tuple = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('Converting parameters...' )
UpperCamelCase : List[Any] = rename_keys(snake_case__ )
replace_params(snake_case__ , snake_case__ , snake_case__ )
# Initialize preprocessor and preprocess input image
UpperCamelCase : List[Any] = convert_image_processor(snake_case__ )
UpperCamelCase : Dict = preprocessor(images=prepare_img() , return_tensors='pt' )
# HF model inference
hf_model.eval()
with torch.no_grad():
UpperCamelCase : Optional[int] = hf_model(**snake_case__ )
UpperCamelCase : Dict = outputs.logits.detach().numpy()
# Original model inference
UpperCamelCase : Optional[int] = False
UpperCamelCase : int = CONFIG_MAP[model_name]['image_size']
UpperCamelCase : List[Any] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
UpperCamelCase : List[Any] = image.img_to_array(snake_case__ )
UpperCamelCase : str = np.expand_dims(snake_case__ , axis=0 )
UpperCamelCase : Any = original_model.predict(snake_case__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(snake_case__ , snake_case__ , atol=1E-3 ), "The predicted logits are not the same."
print('Model outputs match!' )
if save_model:
# Create folder to save model
if not os.path.isdir(snake_case__ ):
os.mkdir(snake_case__ )
# Save converted model and image processor
hf_model.save_pretrained(snake_case__ )
preprocessor.save_pretrained(snake_case__ )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
UpperCamelCase : List[str] = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(snake_case__ )
hf_model.push_to_hub(snake_case__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
__UpperCAmelCase = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 119 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowercase__ :Optional[int] = logging.get_logger(__name__)
lowercase__ :Any = {"vocab_file": "vocab.txt"}
lowercase__ :Any = {
"vocab_file": {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt",
}
}
lowercase__ :List[Any] = {
"YituTech/conv-bert-base": 512,
"YituTech/conv-bert-medium-small": 512,
"YituTech/conv-bert-small": 512,
}
lowercase__ :Optional[Any] = {
"YituTech/conv-bert-base": {"do_lower_case": True},
"YituTech/conv-bert-medium-small": {"do_lower_case": True},
"YituTech/conv-bert-small": {"do_lower_case": True},
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Tuple =VOCAB_FILES_NAMES
lowercase_ : Any =PRETRAINED_VOCAB_FILES_MAP
lowercase_ : List[str] =PRETRAINED_INIT_CONFIGURATION
lowercase_ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : int =ConvBertTokenizer
def __init__( self ,A__=None ,A__=None ,A__=True ,A__="[UNK]" ,A__="[SEP]" ,A__="[PAD]" ,A__="[CLS]" ,A__="[MASK]" ,A__=True ,A__=None ,**A__ ,):
super().__init__(
A__ ,tokenizer_file=A__ ,do_lower_case=A__ ,unk_token=A__ ,sep_token=A__ ,pad_token=A__ ,cls_token=A__ ,mask_token=A__ ,tokenize_chinese_chars=A__ ,strip_accents=A__ ,**A__ ,)
lowercase = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('''lowercase''' ,A__) != do_lower_case
or normalizer_state.get('''strip_accents''' ,A__) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' ,A__) != tokenize_chinese_chars
):
lowercase = getattr(A__ ,normalizer_state.pop('''type'''))
lowercase = do_lower_case
lowercase = strip_accents
lowercase = tokenize_chinese_chars
lowercase = normalizer_class(**A__)
lowercase = do_lower_case
def A__ ( self ,A__ ,A__=None):
lowercase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self ,A__ ,A__ = None):
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def A__ ( self ,A__ ,A__ = None):
lowercase = self._tokenizer.model.save(A__ ,name=A__)
return tuple(A__)
| 371 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ :Dict = logging.get_logger(__name__)
lowercase__ :Optional[Any] = "▁"
lowercase__ :str = {"vocab_file": "prophetnet.tokenizer"}
lowercase__ :List[str] = {
"vocab_file": {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"
),
}
}
lowercase__ :Optional[Any] = {
"microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False},
}
lowercase__ :List[Any] = {
"microsoft/xprophetnet-large-wiki100-cased": 512,
}
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = collections.OrderedDict()
with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' ) as reader:
lowercase = reader.readlines()
for index, token in enumerate(lowerCAmelCase__ ):
lowercase = token.rstrip('''\n''' )
lowercase = index
return vocab
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Optional[Any] =VOCAB_FILES_NAMES
lowercase_ : Any =PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : str =['''input_ids''', '''attention_mask''']
def __init__( self ,A__ ,A__="[SEP]" ,A__="[SEP]" ,A__="[SEP]" ,A__="[UNK]" ,A__="[PAD]" ,A__="[CLS]" ,A__="[MASK]" ,A__ = None ,**A__ ,):
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A__ ,eos_token=A__ ,sep_token=A__ ,unk_token=A__ ,pad_token=A__ ,cls_token=A__ ,mask_token=A__ ,sp_model_kwargs=self.sp_model_kwargs ,**A__ ,)
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''')
raise
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(A__))
lowercase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
lowercase = {'''[PAD]''': 0, '''[CLS]''': 1, '''[SEP]''': 2, '''[UNK]''': 3, '''[MASK]''': 4}
for i in range(1_0):
lowercase = f'[unused{i}]'
lowercase = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
lowercase = 1_2
lowercase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(A__)
def __getstate__( self):
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self ,A__):
lowercase = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''')
raise
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs'''):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def A__ ( self ,A__ ,A__ = None ,A__ = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ ,token_ids_a=A__ ,already_has_special_tokens=A__)
if token_ids_a is None:
return ([0] * len(A__)) + [1]
return ([0] * len(A__)) + [1] + ([0] * len(A__)) + [1]
def A__ ( self ,A__ ,A__ = None):
lowercase = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def A__ ( self):
return len(self.sp_model) + self.fairseq_offset
def A__ ( self):
lowercase = {self.convert_ids_to_tokens(A__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def A__ ( self ,A__):
return self.sp_model.encode(A__ ,out_type=A__)
def A__ ( self ,A__):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase = self.sp_model.PieceToId(A__)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A__ ( self ,A__):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def A__ ( self ,A__):
lowercase = ''''''.join(A__).replace(A__ ,''' ''').strip()
return out_string
def A__ ( self ,A__ ,A__ = None):
if not os.path.isdir(A__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
lowercase = os.path.join(
A__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(A__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file ,A__)
elif not os.path.isfile(self.vocab_file):
with open(A__ ,'''wb''') as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(A__)
return (out_vocab_file,)
def A__ ( self ,A__ ,A__ = None):
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
lowercase = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 97 | 0 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__snake_case :Any = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__snake_case :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 49 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowerCAmelCase__ ( *a__: str , a__: Optional[Union[Dict, Any]] = None , a__: Dict=True , a__: Any=2 ) -> Union[str, Any]:
'''simple docstring'''
from .. import __version__
_UpperCAmelCase = take_from
_UpperCAmelCase = ()
if not isinstance(args[0] , a__ ):
_UpperCAmelCase = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(a__ ).base_version ) >= version.parse(a__ ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
_UpperCAmelCase = None
if isinstance(a__ , a__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(a__ ),)
_UpperCAmelCase = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(a__ , a__ ):
values += (getattr(a__ , a__ ),)
_UpperCAmelCase = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
_UpperCAmelCase = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
_UpperCAmelCase = warning + ' ' if standard_warn else ''
warnings.warn(warning + message , a__ , stacklevel=a__ )
if isinstance(a__ , a__ ) and len(a__ ) > 0:
_UpperCAmelCase = inspect.getouterframes(inspect.currentframe() )[1]
_UpperCAmelCase = call_frame.filename
_UpperCAmelCase = call_frame.lineno
_UpperCAmelCase = call_frame.function
_UpperCAmelCase , _UpperCAmelCase = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(a__ ) == 0:
return
elif len(a__ ) == 1:
return values[0]
return values
| 329 | 0 |
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__a = random.Random()
def __snake_case( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[int]:
if rng is None:
snake_case__ : Union[str, Any] = global_rng
snake_case__ : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , snake_case_ : Optional[Any] , snake_case_ : List[Any]=7 , snake_case_ : Union[str, Any]=400 , snake_case_ : Optional[Any]=2_000 , snake_case_ : List[str]=10 , snake_case_ : List[str]=160 , snake_case_ : str=8 , snake_case_ : Any=0.0 , snake_case_ : str=4_000 , snake_case_ : Union[str, Any]=False , snake_case_ : int=True , ):
snake_case__ : str = parent
snake_case__ : List[str] = batch_size
snake_case__ : Any = min_seq_length
snake_case__ : Tuple = max_seq_length
snake_case__ : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case__ : Any = padding_value
snake_case__ : Optional[Any] = sampling_rate
snake_case__ : Optional[int] = return_attention_mask
snake_case__ : Optional[Any] = do_normalize
snake_case__ : Optional[int] = feature_size
snake_case__ : Tuple = chunk_length
snake_case__ : Optional[int] = hop_length
def lowerCamelCase ( self : Any ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase ( self : Tuple , snake_case_ : List[str]=False , snake_case_ : List[str]=False ):
def _flatten(snake_case_ : Tuple ):
return list(itertools.chain(*snake_case_ ) )
if equal_length:
snake_case__ : Optional[int] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
snake_case__ : List[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case__ : Tuple = [np.asarray(snake_case_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
lowercase = WhisperFeatureExtractor if is_speech_available() else None
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Optional[Any] = WhisperFeatureExtractionTester(self )
def lowerCamelCase ( self : str ):
snake_case__ : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Optional[int] = feat_extract_first.save_pretrained(snake_case_ )[0]
check_json_file_has_correct_format(snake_case_ )
snake_case__ : List[Any] = self.feature_extraction_class.from_pretrained(snake_case_ )
snake_case__ : int = feat_extract_first.to_dict()
snake_case__ : str = feat_extract_second.to_dict()
snake_case__ : str = feat_extract_first.mel_filters
snake_case__ : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(snake_case_ , snake_case_ ) )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCamelCase ( self : Dict ):
snake_case__ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : List[str] = os.path.join(snake_case_ , """feat_extract.json""" )
feat_extract_first.to_json_file(snake_case_ )
snake_case__ : Union[str, Any] = self.feature_extraction_class.from_json_file(snake_case_ )
snake_case__ : Dict = feat_extract_first.to_dict()
snake_case__ : List[str] = feat_extract_second.to_dict()
snake_case__ : int = feat_extract_first.mel_filters
snake_case__ : List[str] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(snake_case_ , snake_case_ ) )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case__ : str = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
snake_case__ : Union[str, Any] = [np.asarray(snake_case_ ) for speech_input in speech_inputs]
# Test feature size
snake_case__ : Tuple = feature_extractor(snake_case_ , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
snake_case__ : List[str] = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
snake_case__ : str = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
# Test batched
snake_case__ : Tuple = feature_extractor(snake_case_ , return_tensors="""np""" ).input_features
snake_case__ : int = feature_extractor(snake_case_ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case_ , snake_case_ ):
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
snake_case__ : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
snake_case__ : int = np.asarray(snake_case_ )
snake_case__ : Union[str, Any] = feature_extractor(snake_case_ , return_tensors="""np""" ).input_features
snake_case__ : Tuple = feature_extractor(snake_case_ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case_ , snake_case_ ):
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
# Test truncation required
snake_case__ : Optional[int] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
snake_case__ : List[str] = [np.asarray(snake_case_ ) for speech_input in speech_inputs]
snake_case__ : str = [x[: feature_extractor.n_samples] for x in speech_inputs]
snake_case__ : List[str] = [np.asarray(snake_case_ ) for speech_input in speech_inputs_truncated]
snake_case__ : Optional[Any] = feature_extractor(snake_case_ , return_tensors="""np""" ).input_features
snake_case__ : Any = feature_extractor(snake_case_ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case_ , snake_case_ ):
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
def lowerCamelCase ( self : Dict ):
import torch
snake_case__ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case__ : Optional[Any] = np.random.rand(100 , 32 ).astype(np.floataa )
snake_case__ : Optional[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case__ : List[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
snake_case__ : List[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowerCamelCase ( self : List[Any] , snake_case_ : Optional[Any] ):
snake_case__ : Any = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
snake_case__ : str = ds.sort("""id""" ).select(range(snake_case_ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def lowerCamelCase ( self : List[Any] ):
snake_case__ : Union[str, Any] = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
snake_case__ : List[str] = self._load_datasamples(1 )
snake_case__ : List[str] = WhisperFeatureExtractor()
snake_case__ : int = feature_extractor(snake_case_ , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , snake_case_ , atol=1E-4 ) )
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case__ : str = self._load_datasamples(1 )[0]
snake_case__ : int = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
snake_case__ : List[str] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=snake_case_ )[0]
self.assertTrue(np.all(np.mean(snake_case_ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(snake_case_ ) - 1 ) < 1E-3 ) )
| 363 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowercase = CycleDiffusionPipeline
lowercase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
lowercase = PipelineTesterMixin.required_optional_params - {"latents"}
lowercase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} )
lowercase = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase ( self : Dict ):
torch.manual_seed(0 )
snake_case__ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
snake_case__ : Any = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , num_train_timesteps=1_000 , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
snake_case__ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
snake_case__ : Union[str, Any] = CLIPTextModel(snake_case_ )
snake_case__ : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case__ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase ( self : Optional[int] , snake_case_ : Any , snake_case_ : str=0 ):
snake_case__ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
snake_case__ : List[str] = image / 2 + 0.5
if str(snake_case_ ).startswith("""mps""" ):
snake_case__ : Tuple = torch.manual_seed(snake_case_ )
else:
snake_case__ : Union[str, Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
snake_case__ : str = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase ( self : Tuple ):
snake_case__ : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case__ : Optional[Any] = self.get_dummy_components()
snake_case__ : List[Any] = CycleDiffusionPipeline(**snake_case_ )
snake_case__ : List[str] = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Tuple = self.get_dummy_inputs(snake_case_ )
snake_case__ : Any = pipe(**snake_case_ )
snake_case__ : Tuple = output.images
snake_case__ : Optional[int] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
snake_case__ : List[Any] = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def lowerCamelCase ( self : List[str] ):
snake_case__ : List[str] = self.get_dummy_components()
for name, module in components.items():
if hasattr(snake_case_ , """half""" ):
snake_case__ : Optional[int] = module.half()
snake_case__ : List[Any] = CycleDiffusionPipeline(**snake_case_ )
snake_case__ : Union[str, Any] = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : int = self.get_dummy_inputs(snake_case_ )
snake_case__ : List[Any] = pipe(**snake_case_ )
snake_case__ : Any = output.images
snake_case__ : Optional[Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
snake_case__ : List[Any] = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def lowerCamelCase ( self : List[Any] ):
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def lowerCamelCase ( self : int ):
return super().test_inference_batch_single_identical()
@skip_mps
def lowerCamelCase ( self : Dict ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowerCamelCase ( self : int ):
return super().test_save_load_optional_components()
@skip_mps
def lowerCamelCase ( self : Optional[int] ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : Dict ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self : str ):
snake_case__ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
snake_case__ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
snake_case__ : Tuple = init_image.resize((512, 512) )
snake_case__ : List[Any] = """CompVis/stable-diffusion-v1-4"""
snake_case__ : Tuple = DDIMScheduler.from_pretrained(snake_case_ , subfolder="""scheduler""" )
snake_case__ : Optional[int] = CycleDiffusionPipeline.from_pretrained(
snake_case_ , scheduler=snake_case_ , safety_checker=snake_case_ , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
snake_case__ : int = """A black colored car"""
snake_case__ : int = """A blue colored car"""
snake_case__ : Dict = torch.manual_seed(0 )
snake_case__ : Dict = pipe(
prompt=snake_case_ , source_prompt=snake_case_ , image=snake_case_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=snake_case_ , output_type="""np""" , )
snake_case__ : Union[str, Any] = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def lowerCamelCase ( self : int ):
snake_case__ : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
snake_case__ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
snake_case__ : Dict = init_image.resize((512, 512) )
snake_case__ : Tuple = """CompVis/stable-diffusion-v1-4"""
snake_case__ : List[Any] = DDIMScheduler.from_pretrained(snake_case_ , subfolder="""scheduler""" )
snake_case__ : str = CycleDiffusionPipeline.from_pretrained(snake_case_ , scheduler=snake_case_ , safety_checker=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
snake_case__ : Tuple = """A black colored car"""
snake_case__ : List[Any] = """A blue colored car"""
snake_case__ : Optional[Any] = torch.manual_seed(0 )
snake_case__ : Any = pipe(
prompt=snake_case_ , source_prompt=snake_case_ , image=snake_case_ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=snake_case_ , output_type="""np""" , )
snake_case__ : List[Any] = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 43 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import ceil, floor, sqrt
def _lowerCAmelCase ( UpperCamelCase_ = 200_0000 ):
__SCREAMING_SNAKE_CASE = [0]
__SCREAMING_SNAKE_CASE = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__SCREAMING_SNAKE_CASE = 0
# the area corresponding to the grid that gives the product closest to target
__SCREAMING_SNAKE_CASE = 0
# an estimate of b, using the quadratic formula
__SCREAMING_SNAKE_CASE = 42
# the largest integer less than b_estimate
__SCREAMING_SNAKE_CASE = 42
# the largest integer less than b_estimate
__SCREAMING_SNAKE_CASE = 42
# the triangle number corresponding to b_floor
__SCREAMING_SNAKE_CASE = 42
# the triangle number corresponding to b_ceil
__SCREAMING_SNAKE_CASE = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__SCREAMING_SNAKE_CASE = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__SCREAMING_SNAKE_CASE = floor(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = ceil(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = triangle_numbers[b_floor]
__SCREAMING_SNAKE_CASE = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__SCREAMING_SNAKE_CASE = triangle_b_first_guess * triangle_a
__SCREAMING_SNAKE_CASE = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__SCREAMING_SNAKE_CASE = triangle_b_second_guess * triangle_a
__SCREAMING_SNAKE_CASE = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"""{solution() = }""")
| 100 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
_a = logging.get_logger(__name__)
class _UpperCAmelCase( lowerCamelCase ):
def UpperCAmelCase ( self , __a) -> Tuple:
'''simple docstring'''
if isinstance(__a , __a):
_UpperCamelCase = [label.strip() for label in labels.split(''',''') if label.strip()]
return labels
def __call__( self , __a , __a , __a) -> Optional[int]:
'''simple docstring'''
if len(__a) == 0 or len(__a) == 0:
raise ValueError('''You must include at least one label and at least one sequence.''')
if hypothesis_template.format(labels[0]) == hypothesis_template:
raise ValueError(
(
'''The provided hypothesis_template "{}" was not able to be formatted with the target labels. '''
'''Make sure the passed template includes formatting syntax such as {{}} where the label should go.'''
).format(__a))
if isinstance(__a , __a):
_UpperCamelCase = [sequences]
_UpperCamelCase = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(__a)] for label in labels])
return sequence_pairs, sequences
@add_end_docstrings(lowerCamelCase )
class _UpperCAmelCase( lowerCamelCase ):
def __init__( self , __a=ZeroShotClassificationArgumentHandler() , *__a , **__a) -> List[str]:
'''simple docstring'''
_UpperCamelCase = args_parser
super().__init__(*__a , **__a)
if self.entailment_id == -1:
logger.warning(
'''Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '''
'''-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.''')
@property
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('''entail'''):
return ind
return -1
def UpperCAmelCase ( self , __a , __a=True , __a=True , __a=TruncationStrategy.ONLY_FIRST , **__a) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'''Tokenizer was not supporting padding necessary for zero-shot, attempting to use '''
''' `pad_token=eos_token`''')
_UpperCamelCase = self.tokenizer.eos_token
try:
_UpperCamelCase = self.tokenizer(
__a , add_special_tokens=__a , return_tensors=__a , padding=__a , truncation=__a , )
except Exception as e:
if "too short" in str(__a):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
_UpperCamelCase = self.tokenizer(
__a , add_special_tokens=__a , return_tensors=__a , padding=__a , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def UpperCAmelCase ( self , **__a) -> Any:
'''simple docstring'''
if kwargs.get('''multi_class''' , __a) is not None:
_UpperCamelCase = kwargs['''multi_class''']
logger.warning(
'''The `multi_class` argument has been deprecated and renamed to `multi_label`. '''
'''`multi_class` will be removed in a future version of Transformers.''')
_UpperCamelCase = {}
if "candidate_labels" in kwargs:
_UpperCamelCase = self._args_parser._parse_labels(kwargs['''candidate_labels'''])
if "hypothesis_template" in kwargs:
_UpperCamelCase = kwargs['''hypothesis_template''']
_UpperCamelCase = {}
if "multi_label" in kwargs:
_UpperCamelCase = kwargs['''multi_label''']
return preprocess_params, {}, postprocess_params
def __call__( self , __a , *__a , **__a , ) -> int:
'''simple docstring'''
if len(__a) == 0:
pass
elif len(__a) == 1 and "candidate_labels" not in kwargs:
_UpperCamelCase = args[0]
else:
raise ValueError(F'''Unable to understand extra arguments {args}''')
return super().__call__(__a , **__a)
def UpperCAmelCase ( self , __a , __a=None , __a="This example is {}.") -> Dict:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self._args_parser(__a , __a , __a)
for i, (candidate_label, sequence_pair) in enumerate(zip(__a , __a)):
_UpperCamelCase = self._parse_and_tokenize([sequence_pair])
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(__a) - 1,
**model_input,
}
def UpperCAmelCase ( self , __a) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = inputs['''candidate_label''']
_UpperCamelCase = inputs['''sequence''']
_UpperCamelCase = {k: inputs[k] for k in self.tokenizer.model_input_names}
_UpperCamelCase = self.model(**__a)
_UpperCamelCase = {
'''candidate_label''': candidate_label,
'''sequence''': sequence,
'''is_last''': inputs['''is_last'''],
**outputs,
}
return model_outputs
def UpperCAmelCase ( self , __a , __a=False) -> Dict:
'''simple docstring'''
_UpperCamelCase = [outputs['''candidate_label'''] for outputs in model_outputs]
_UpperCamelCase = [outputs['''sequence'''] for outputs in model_outputs]
_UpperCamelCase = np.concatenate([output['''logits'''].numpy() for output in model_outputs])
_UpperCamelCase = logits.shape[0]
_UpperCamelCase = len(__a)
_UpperCamelCase = N // n
_UpperCamelCase = logits.reshape((num_sequences, n, -1))
if multi_label or len(__a) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
_UpperCamelCase = self.entailment_id
_UpperCamelCase = -1 if entailment_id == 0 else 0
_UpperCamelCase = reshaped_outputs[..., [contradiction_id, entailment_id]]
_UpperCamelCase = np.exp(__a) / np.exp(__a).sum(-1 , keepdims=__a)
_UpperCamelCase = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
_UpperCamelCase = reshaped_outputs[..., self.entailment_id]
_UpperCamelCase = np.exp(__a) / np.exp(__a).sum(-1 , keepdims=__a)
_UpperCamelCase = list(reversed(scores[0].argsort()))
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 194 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
A_ : Union[str, Any] = logging.get_logger(__name__)
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = ["""pixel_values"""]
def __init__( self ,a_ = True ,a_ = None ,a_ = PILImageResampling.BICUBIC ,a_ = True ,a_ = True ,a_ = 1 / 255 ,a_ = None ,a_ = True ,a_ = None ,a_ = None ,**a_ ,) -> None:
super().__init__(**a_ )
_UpperCAmelCase : Tuple = size if size is not None else {"""height""": 224, """width""": 224}
_UpperCAmelCase : Optional[Any] = get_size_dict(a_ )
_UpperCAmelCase : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_UpperCAmelCase : Optional[int] = get_size_dict(a_ ,default_to_square=a_ ,param_name="""crop_size""" )
_UpperCAmelCase : List[Any] = do_resize
_UpperCAmelCase : str = do_rescale
_UpperCAmelCase : str = do_normalize
_UpperCAmelCase : Any = do_center_crop
_UpperCAmelCase : List[str] = crop_size
_UpperCAmelCase : Any = size
_UpperCAmelCase : List[str] = resample
_UpperCAmelCase : Union[str, Any] = rescale_factor
_UpperCAmelCase : Any = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_UpperCAmelCase : Any = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _snake_case ( self ,a_ ,a_ ,a_ = PILImageResampling.BILINEAR ,a_ = None ,**a_ ,) -> np.ndarray:
_UpperCAmelCase : Optional[Any] = get_size_dict(a_ )
if "shortest_edge" in size:
_UpperCAmelCase : Any = get_resize_output_image_size(a_ ,size=size["""shortest_edge"""] ,default_to_square=a_ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_UpperCAmelCase : str = (size["""height"""], size["""width"""])
else:
raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(a_ ,size=a_ ,resample=a_ ,data_format=a_ ,**a_ )
def _snake_case ( self ,a_ ,a_ ,a_ = None ,**a_ ,) -> np.ndarray:
_UpperCAmelCase : Any = get_size_dict(a_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(a_ ,size=(size["""height"""], size["""width"""]) ,data_format=a_ ,**a_ )
def _snake_case ( self ,a_ ,a_ ,a_ = None ,**a_ ) -> np.ndarray:
return rescale(a_ ,scale=a_ ,data_format=a_ ,**a_ )
def _snake_case ( self ,a_ ,a_ ,a_ ,a_ = None ,**a_ ,) -> np.ndarray:
return normalize(a_ ,mean=a_ ,std=a_ ,data_format=a_ ,**a_ )
def _snake_case ( self ,a_ ,a_ = None ,a_ = None ,a_ = None ,a_ = None ,a_ = None ,a_ = None ,a_ = None ,a_ = None ,a_ = None ,a_ = None ,a_ = None ,a_ = ChannelDimension.FIRST ,**a_ ,) -> BatchFeature:
_UpperCAmelCase : str = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase : int = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase : Any = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase : List[Any] = get_size_dict(a_ ,param_name="""crop_size""" ,default_to_square=a_ )
_UpperCAmelCase : Tuple = resample if resample is not None else self.resample
_UpperCAmelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase : int = image_std if image_std is not None else self.image_std
_UpperCAmelCase : List[str] = size if size is not None else self.size
_UpperCAmelCase : Tuple = get_size_dict(a_ )
if not is_batched(a_ ):
_UpperCAmelCase : Optional[int] = [images]
if not valid_images(a_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
_UpperCAmelCase : str = [to_numpy_array(a_ ) for image in images]
if do_resize:
_UpperCAmelCase : Tuple = [self.resize(image=a_ ,size=a_ ,resample=a_ ) for image in images]
if do_center_crop:
_UpperCAmelCase : Optional[Any] = [self.center_crop(image=a_ ,size=a_ ) for image in images]
if do_rescale:
_UpperCAmelCase : Optional[int] = [self.rescale(image=a_ ,scale=a_ ) for image in images]
if do_normalize:
_UpperCAmelCase : Optional[int] = [self.normalize(image=a_ ,mean=a_ ,std=a_ ) for image in images]
_UpperCAmelCase : Any = [to_channel_dimension_format(a_ ,a_ ) for image in images]
_UpperCAmelCase : List[str] = {"""pixel_values""": images}
return BatchFeature(data=a_ ,tensor_type=a_ )
| 349 |
'''simple docstring'''
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 )-> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = right or len(lowerCAmelCase_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(lowerCAmelCase_ , lowerCAmelCase_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 1 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class a ( _lowerCamelCase, _lowerCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self: str , UpperCamelCase: int = 1_28 , UpperCamelCase: int = 2_56 , UpperCamelCase: float = 2_000.0 , UpperCamelCase: int = 7_68 , UpperCamelCase: int = 12 , UpperCamelCase: int = 12 , UpperCamelCase: int = 64 , UpperCamelCase: int = 20_48 , UpperCamelCase: float = 0.1 , ):
"""simple docstring"""
super().__init__()
A__ = nn.Sequential(
nn.Linear(UpperCamelCase , d_model * 4 , bias=UpperCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=UpperCamelCase ) , nn.SiLU() , )
A__ = nn.Embedding(UpperCamelCase , UpperCamelCase )
A__ = False
A__ = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase )
A__ = nn.Dropout(p=UpperCamelCase )
A__ = nn.ModuleList()
for lyr_num in range(UpperCamelCase ):
# FiLM conditional T5 decoder
A__ = DecoderLayer(d_model=UpperCamelCase , d_kv=UpperCamelCase , num_heads=UpperCamelCase , d_ff=UpperCamelCase , dropout_rate=UpperCamelCase )
self.decoders.append(UpperCamelCase )
A__ = TaLayerNorm(UpperCamelCase )
A__ = nn.Dropout(p=UpperCamelCase )
A__ = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase )
def UpperCamelCase ( self: Dict , UpperCamelCase: int , UpperCamelCase: int ):
"""simple docstring"""
A__ = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase ( self: Tuple , UpperCamelCase: Optional[Any] , UpperCamelCase: List[Any] , UpperCamelCase: Any ):
"""simple docstring"""
A__ , A__ , A__ = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
A__ = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
A__ = self.conditioning_emb(UpperCamelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
A__ = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
A__ = torch.broadcast_to(
torch.arange(UpperCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
A__ = self.position_encoding(UpperCamelCase )
A__ = self.continuous_inputs_projection(UpperCamelCase )
inputs += position_encodings
A__ = self.dropout(UpperCamelCase )
# decoder: No padding present.
A__ = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
A__ = [(x, self.encoder_decoder_mask(UpperCamelCase , UpperCamelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
A__ = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
A__ = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
A__ = lyr(
UpperCamelCase , conditioning_emb=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , )[0]
A__ = self.decoder_norm(UpperCamelCase )
A__ = self.post_dropout(UpperCamelCase )
A__ = self.spec_out(UpperCamelCase )
return spec_out
class a ( nn.Module ):
"""simple docstring"""
def __init__( self: int , UpperCamelCase: Any , UpperCamelCase: Optional[int] , UpperCamelCase: str , UpperCamelCase: str , UpperCamelCase: Any , UpperCamelCase: Tuple=1e-6 ):
"""simple docstring"""
super().__init__()
A__ = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=UpperCamelCase , d_kv=UpperCamelCase , num_heads=UpperCamelCase , dropout_rate=UpperCamelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=UpperCamelCase , d_kv=UpperCamelCase , num_heads=UpperCamelCase , dropout_rate=UpperCamelCase , layer_norm_epsilon=UpperCamelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=UpperCamelCase , d_ff=UpperCamelCase , dropout_rate=UpperCamelCase , layer_norm_epsilon=UpperCamelCase ) )
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: Optional[Any] , UpperCamelCase: Union[str, Any]=None , UpperCamelCase: Any=None , UpperCamelCase: Tuple=None , UpperCamelCase: List[Any]=None , UpperCamelCase: List[Any]=None , ):
"""simple docstring"""
A__ = self.layer[0](
UpperCamelCase , conditioning_emb=UpperCamelCase , attention_mask=UpperCamelCase , )
if encoder_hidden_states is not None:
A__ = torch.where(encoder_attention_mask > 0 , 0 , -1e1_0 ).to(
encoder_hidden_states.dtype )
A__ = self.layer[1](
UpperCamelCase , key_value_states=UpperCamelCase , attention_mask=UpperCamelCase , )
# Apply Film Conditional Feed Forward layer
A__ = self.layer[-1](UpperCamelCase , UpperCamelCase )
return (hidden_states,)
class a ( nn.Module ):
"""simple docstring"""
def __init__( self: Union[str, Any] , UpperCamelCase: int , UpperCamelCase: str , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
super().__init__()
A__ = TaLayerNorm(UpperCamelCase )
A__ = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase )
A__ = Attention(query_dim=UpperCamelCase , heads=UpperCamelCase , dim_head=UpperCamelCase , out_bias=UpperCamelCase , scale_qk=UpperCamelCase )
A__ = nn.Dropout(UpperCamelCase )
def UpperCamelCase ( self: int , UpperCamelCase: Optional[int] , UpperCamelCase: str=None , UpperCamelCase: str=None , ):
"""simple docstring"""
A__ = self.layer_norm(UpperCamelCase )
if conditioning_emb is not None:
A__ = self.FiLMLayer(UpperCamelCase , UpperCamelCase )
# Self-attention block
A__ = self.attention(UpperCamelCase )
A__ = hidden_states + self.dropout(UpperCamelCase )
return hidden_states
class a ( nn.Module ):
"""simple docstring"""
def __init__( self: Any , UpperCamelCase: int , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[str] , UpperCamelCase: int ):
"""simple docstring"""
super().__init__()
A__ = Attention(query_dim=UpperCamelCase , heads=UpperCamelCase , dim_head=UpperCamelCase , out_bias=UpperCamelCase , scale_qk=UpperCamelCase )
A__ = TaLayerNorm(UpperCamelCase , eps=UpperCamelCase )
A__ = nn.Dropout(UpperCamelCase )
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: str , UpperCamelCase: List[Any]=None , UpperCamelCase: List[Any]=None , ):
"""simple docstring"""
A__ = self.layer_norm(UpperCamelCase )
A__ = self.attention(
UpperCamelCase , encoder_hidden_states=UpperCamelCase , attention_mask=attention_mask.squeeze(1 ) , )
A__ = hidden_states + self.dropout(UpperCamelCase )
return layer_output
class a ( nn.Module ):
"""simple docstring"""
def __init__( self: int , UpperCamelCase: Optional[Any] , UpperCamelCase: Tuple , UpperCamelCase: Any , UpperCamelCase: List[str] ):
"""simple docstring"""
super().__init__()
A__ = TaDenseGatedActDense(d_model=UpperCamelCase , d_ff=UpperCamelCase , dropout_rate=UpperCamelCase )
A__ = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase )
A__ = TaLayerNorm(UpperCamelCase , eps=UpperCamelCase )
A__ = nn.Dropout(UpperCamelCase )
def UpperCamelCase ( self: Dict , UpperCamelCase: Tuple , UpperCamelCase: Optional[Any]=None ):
"""simple docstring"""
A__ = self.layer_norm(UpperCamelCase )
if conditioning_emb is not None:
A__ = self.film(UpperCamelCase , UpperCamelCase )
A__ = self.DenseReluDense(UpperCamelCase )
A__ = hidden_states + self.dropout(UpperCamelCase )
return hidden_states
class a ( nn.Module ):
"""simple docstring"""
def __init__( self: str , UpperCamelCase: Dict , UpperCamelCase: int , UpperCamelCase: Tuple ):
"""simple docstring"""
super().__init__()
A__ = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase )
A__ = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase )
A__ = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase )
A__ = nn.Dropout(UpperCamelCase )
A__ = NewGELUActivation()
def UpperCamelCase ( self: Any , UpperCamelCase: Any ):
"""simple docstring"""
A__ = self.act(self.wi_a(UpperCamelCase ) )
A__ = self.wi_a(UpperCamelCase )
A__ = hidden_gelu * hidden_linear
A__ = self.dropout(UpperCamelCase )
A__ = self.wo(UpperCamelCase )
return hidden_states
class a ( nn.Module ):
"""simple docstring"""
def __init__( self: Any , UpperCamelCase: Optional[int] , UpperCamelCase: Optional[Any]=1e-6 ):
"""simple docstring"""
super().__init__()
A__ = nn.Parameter(torch.ones(UpperCamelCase ) )
A__ = eps
def UpperCamelCase ( self: Any , UpperCamelCase: Optional[int] ):
"""simple docstring"""
A__ = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=UpperCamelCase )
A__ = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
A__ = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class a ( nn.Module ):
"""simple docstring"""
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: torch.Tensor ):
"""simple docstring"""
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(UpperCamelCase , 3.0 )) ))
class a ( nn.Module ):
"""simple docstring"""
def __init__( self: Dict , UpperCamelCase: int , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
super().__init__()
A__ = nn.Linear(UpperCamelCase , out_features * 2 , bias=UpperCamelCase )
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: List[Any] , UpperCamelCase: List[str] ):
"""simple docstring"""
A__ = self.scale_bias(UpperCamelCase )
A__ , A__ = torch.chunk(UpperCamelCase , 2 , -1 )
A__ = x * (1 + scale) + shift
return x
| 335 |
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" , [
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(UpperCAmelCase_ , i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def _snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int ):
A__ = _distribute_shards(**UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" , [
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] , )
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
A__ = _split_gen_kwargs(UpperCAmelCase_ , UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" , [
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] , )
def _snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] ):
if expected is RuntimeError:
with pytest.raises(UpperCAmelCase_ ):
_number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
else:
A__ = _number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
assert out == expected
| 335 | 1 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__A = logging.get_logger(__name__)
# General docstring
__A = """MobileNetV1Config"""
# Base docstring
__A = """google/mobilenet_v1_1.0_224"""
__A = [1, 1_0_2_4, 7, 7]
# Image classification docstring
__A = """google/mobilenet_v1_1.0_224"""
__A = """tabby, tabby cat"""
__A = [
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ) -> str:
lowercase__: str = {}
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Dict = model.mobilenet_va
else:
lowercase__: Any = model
lowercase__: Any = 'MobilenetV1/Conv2d_0/'
lowercase__: List[Any] = backbone.conv_stem.convolution.weight
lowercase__: int = backbone.conv_stem.normalization.bias
lowercase__: List[Any] = backbone.conv_stem.normalization.weight
lowercase__: Tuple = backbone.conv_stem.normalization.running_mean
lowercase__: Tuple = backbone.conv_stem.normalization.running_var
for i in range(1_3 ):
lowercase__: List[str] = i + 1
lowercase__: int = i * 2
lowercase__: Optional[int] = backbone.layer[pt_index]
lowercase__: List[Any] = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
lowercase__: Optional[int] = pointer.convolution.weight
lowercase__: Any = pointer.normalization.bias
lowercase__: Union[str, Any] = pointer.normalization.weight
lowercase__: Dict = pointer.normalization.running_mean
lowercase__: Optional[Any] = pointer.normalization.running_var
lowercase__: Union[str, Any] = backbone.layer[pt_index + 1]
lowercase__: Tuple = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
lowercase__: Any = pointer.convolution.weight
lowercase__: List[Any] = pointer.normalization.bias
lowercase__: int = pointer.normalization.weight
lowercase__: str = pointer.normalization.running_mean
lowercase__: Optional[int] = pointer.normalization.running_var
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__: List[str] = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
lowercase__: Any = model.classifier.weight
lowercase__: int = model.classifier.bias
return tf_to_pt_map
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''' )
raise
# Load weights from TF model
lowercase__: List[str] = tf.train.list_variables(_UpperCAmelCase )
lowercase__: Tuple = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
lowercase__: Optional[int] = tf.train.load_variable(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = array
# Build TF to PyTorch weights loading map
lowercase__: int = _build_tf_to_pytorch_map(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
lowercase__: Optional[Any] = tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''' )
lowercase__: Any = np.transpose(_UpperCAmelCase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('''Transposing''' )
if len(pointer.shape ) == 2: # copying into linear layer
lowercase__: Optional[int] = array.squeeze().transpose()
else:
lowercase__: Tuple = np.transpose(_UpperCAmelCase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
lowercase__: List[str] = torch.from_numpy(_UpperCAmelCase )
tf_weights.pop(_UpperCAmelCase , _UpperCAmelCase )
tf_weights.pop(name + '''/RMSProp''' , _UpperCAmelCase )
tf_weights.pop(name + '''/RMSProp_1''' , _UpperCAmelCase )
tf_weights.pop(name + '''/ExponentialMovingAverage''' , _UpperCAmelCase )
logger.info(F"""Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}""" )
return model
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> torch.Tensor:
lowercase__: Dict = features.shape[-2:]
lowercase__: str = conv_layer.stride
lowercase__: Any = conv_layer.kernel_size
if in_height % stride_height == 0:
lowercase__: str = max(kernel_height - stride_height , 0 )
else:
lowercase__: Dict = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
lowercase__: int = max(kernel_width - stride_width , 0 )
else:
lowercase__: Tuple = max(kernel_width - (in_width % stride_width) , 0 )
lowercase__: Any = pad_along_width // 2
lowercase__: Optional[Any] = pad_along_width - pad_left
lowercase__: Any = pad_along_height // 2
lowercase__: Optional[int] = pad_along_height - pad_top
lowercase__: str = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_UpperCAmelCase , _UpperCAmelCase , '''constant''' , 0.0 )
class UpperCAmelCase (nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , _UpperCAmelCase = 1 , _UpperCAmelCase = False , _UpperCAmelCase = True , _UpperCAmelCase = True , ):
super().__init__()
lowercase__: Dict = config
if in_channels % groups != 0:
raise ValueError(F"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(F"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
lowercase__: List[str] = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
lowercase__: Any = nn.Convad(
in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , kernel_size=_UpperCAmelCase , stride=_UpperCAmelCase , padding=_UpperCAmelCase , groups=_UpperCAmelCase , bias=_UpperCAmelCase , padding_mode='''zeros''' , )
if use_normalization:
lowercase__: Optional[int] = nn.BatchNormad(
num_features=_UpperCAmelCase , eps=config.layer_norm_eps , momentum=0.9_997 , affine=_UpperCAmelCase , track_running_stats=_UpperCAmelCase , )
else:
lowercase__: Tuple = None
if use_activation:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Tuple = ACTaFN[use_activation]
elif isinstance(config.hidden_act , _UpperCAmelCase ):
lowercase__: Any = ACTaFN[config.hidden_act]
else:
lowercase__: Dict = config.hidden_act
else:
lowercase__: Optional[int] = None
def _snake_case ( self , _UpperCAmelCase ):
if self.config.tf_padding:
lowercase__: Dict = apply_tf_padding(_UpperCAmelCase , self.convolution )
lowercase__: Union[str, Any] = self.convolution(_UpperCAmelCase )
if self.normalization is not None:
lowercase__: Union[str, Any] = self.normalization(_UpperCAmelCase )
if self.activation is not None:
lowercase__: List[str] = self.activation(_UpperCAmelCase )
return features
class UpperCAmelCase (__UpperCamelCase ):
"""simple docstring"""
_UpperCAmelCase :int = MobileNetVaConfig
_UpperCAmelCase :Any = load_tf_weights_in_mobilenet_va
_UpperCAmelCase :Optional[Any] = "mobilenet_v1"
_UpperCAmelCase :List[Any] = "pixel_values"
_UpperCAmelCase :Tuple = False
def _snake_case ( self , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_UpperCAmelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__A = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
__A = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." ,__UpperCamelCase ,)
class UpperCAmelCase (__UpperCamelCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = True ):
super().__init__(_UpperCAmelCase )
lowercase__: Optional[Any] = config
lowercase__: Dict = 32
lowercase__: List[str] = max(int(depth * config.depth_multiplier ) , config.min_depth )
lowercase__: str = MobileNetVaConvLayer(
_UpperCAmelCase , in_channels=config.num_channels , out_channels=_UpperCAmelCase , kernel_size=3 , stride=2 , )
lowercase__: List[str] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
lowercase__: Any = nn.ModuleList()
for i in range(13 ):
lowercase__: Tuple = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
lowercase__: List[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , kernel_size=3 , stride=strides[i] , groups=_UpperCAmelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , kernel_size=1 , ) )
lowercase__: Optional[int] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def _snake_case ( self , _UpperCAmelCase ):
raise NotImplementedError
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _snake_case ( self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
lowercase__: Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__: int = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
lowercase__: Tuple = self.conv_stem(_UpperCAmelCase )
lowercase__: Optional[Any] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
lowercase__: Any = layer_module(_UpperCAmelCase )
if output_hidden_states:
lowercase__: Union[str, Any] = all_hidden_states + (hidden_states,)
lowercase__: List[Any] = hidden_states
if self.pooler is not None:
lowercase__: Tuple = torch.flatten(self.pooler(_UpperCAmelCase ) , start_dim=1 )
else:
lowercase__: Any = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=_UpperCAmelCase , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,__UpperCamelCase ,)
class UpperCAmelCase (__UpperCamelCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
lowercase__: List[str] = config.num_labels
lowercase__: Dict = MobileNetVaModel(_UpperCAmelCase )
lowercase__: Optional[int] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
lowercase__: str = nn.Dropout(config.classifier_dropout_prob , inplace=_UpperCAmelCase )
lowercase__: str = nn.Linear(_UpperCAmelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _snake_case ( self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
lowercase__: List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__: List[str] = self.mobilenet_va(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase )
lowercase__: Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
lowercase__: List[Any] = self.classifier(self.dropout(_UpperCAmelCase ) )
lowercase__: Tuple = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase__: Dict = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase__: Tuple = 'single_label_classification'
else:
lowercase__: Dict = 'multi_label_classification'
if self.config.problem_type == "regression":
lowercase__: Union[str, Any] = MSELoss()
if self.num_labels == 1:
lowercase__: int = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase__: Dict = loss_fct(_UpperCAmelCase , _UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
lowercase__: Optional[int] = CrossEntropyLoss()
lowercase__: List[str] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase__: Union[str, Any] = BCEWithLogitsLoss()
lowercase__: int = loss_fct(_UpperCAmelCase , _UpperCAmelCase )
if not return_dict:
lowercase__: Any = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states , )
| 350 | """simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 2 | 0 |
from manim import *
class __A ( a ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =Rectangle(height=0.5 , width=0.5 )
__UpperCamelCase : Dict =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__UpperCamelCase : Union[str, Any] =[mem.copy() for i in range(6 )]
__UpperCamelCase : List[str] =[mem.copy() for i in range(6 )]
__UpperCamelCase : int =VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
__UpperCamelCase : int =VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
__UpperCamelCase : List[Any] =VGroup(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
__UpperCamelCase : str =Text('CPU' , font_size=24 )
__UpperCamelCase : Dict =Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =[mem.copy() for i in range(4 )]
__UpperCamelCase : List[str] =VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
__UpperCamelCase : str =Text('GPU' , font_size=24 )
__UpperCamelCase : List[str] =Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCamelCase__ )
__UpperCamelCase : Optional[int] =[mem.copy() for i in range(6 )]
__UpperCamelCase : Dict =VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
__UpperCamelCase : Optional[int] =Text('Model' , font_size=24 )
__UpperCamelCase : Tuple =Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
model.move_to([3, -1.0, 0] )
self.add(lowerCamelCase__ )
__UpperCamelCase : List[Any] =[]
for i, rect in enumerate(lowerCamelCase__ ):
rect.set_stroke(lowerCamelCase__ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
__UpperCamelCase : List[Any] =Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowerCamelCase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowerCamelCase__ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowerCamelCase__ , buff=0.0 )
self.add(lowerCamelCase__ )
cpu_targs.append(lowerCamelCase__ )
__UpperCamelCase : Tuple =[mem.copy() for i in range(6 )]
__UpperCamelCase : List[str] =VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
__UpperCamelCase : List[str] =Text('Loaded Checkpoint' , font_size=24 )
__UpperCamelCase : Union[str, Any] =Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , aligned_edge=lowerCamelCase__ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
__UpperCamelCase : List[Any] =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__UpperCamelCase : List[str] =MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Tuple =MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(lowerCamelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
__UpperCamelCase : int =MarkupText(
f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase__ ) , Write(lowerCamelCase__ ) )
self.play(Write(lowerCamelCase__ , run_time=1 ) , Create(lowerCamelCase__ , run_time=1 ) )
__UpperCamelCase : str =[]
__UpperCamelCase : Tuple =[]
for i, rect in enumerate(lowerCamelCase__ ):
__UpperCamelCase : List[Any] =fill.copy().set_fill(lowerCamelCase__ , opacity=0.7 )
target.move_to(lowerCamelCase__ )
first_animations.append(GrowFromCenter(lowerCamelCase__ , run_time=1 ) )
__UpperCamelCase : Dict =target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowerCamelCase__ , run_time=1.5 ) )
self.play(*lowerCamelCase__ )
self.play(*lowerCamelCase__ )
self.wait()
| 71 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_a = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = AlbertTokenizer
UpperCamelCase__ = AlbertTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
def UpperCamelCase ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = AlbertTokenizer(UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = 'this is a test'
_UpperCAmelCase = 'this is a test'
return input_text, output_text
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = '<pad>'
_UpperCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '▁eloquent' )
self.assertEqual(len(UpperCAmelCase ) , 3_0000 )
def UpperCamelCase ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def UpperCamelCase ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = 'I was born in 92000, and this is falsé.'
_UpperCAmelCase = tokenizer.tokenize(UpperCAmelCase )
_UpperCAmelCase = rust_tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
_UpperCAmelCase = rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(UpperCAmelCase )
_UpperCAmelCase = rust_tokenizer.encode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = AlbertTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
_UpperCAmelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase , ['▁this', '▁is', '▁a', '▁test'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [48, 25, 21, 1289] )
_UpperCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] )
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'] , )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = AlbertTokenizer(UpperCAmelCase )
_UpperCAmelCase = tokenizer.encode('sequence builders' )
_UpperCAmelCase = tokenizer.encode('multi-sequence build' )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name='albert-base-v2' , revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e' , )
| 39 | 0 |
from __future__ import annotations
from collections.abc import Callable
__lowerCamelCase : Optional[int] = list[list[float | int]]
def SCREAMING_SNAKE_CASE ( snake_case_ : Matrix , snake_case_ : Matrix ):
snake_case__ : Dict = len(__A )
snake_case__ : Tuple = [[0 for _ in range(size + 1 )] for _ in range(__A )]
snake_case__ : Optional[int] = 42
snake_case__ : List[str] = 42
snake_case__ : int = 42
snake_case__ : int = 42
snake_case__ : int = 42
snake_case__ : Union[str, Any] = 42
for row in range(__A ):
for col in range(__A ):
snake_case__ : List[Any] = matrix[row][col]
snake_case__ : Optional[Any] = vector[row][0]
snake_case__ : Tuple = 0
snake_case__ : str = 0
while row < size and col < size:
# pivoting
snake_case__ : Optional[int] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__A , __A ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
snake_case__, snake_case__ : Any = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __A ):
snake_case__ : Dict = augmented[rowa][col] / augmented[row][col]
snake_case__ : Optional[int] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __A ):
for row in range(__A ):
snake_case__ : Dict = augmented[row][col] / augmented[col][col]
for cola in range(__A , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__A )
]
def SCREAMING_SNAKE_CASE ( snake_case_ : list[int] ):
snake_case__ : List[Any] = len(__A )
snake_case__ : List[Any] = [[0 for _ in range(__A )] for _ in range(__A )]
snake_case__ : Tuple = [[0] for _ in range(__A )]
snake_case__ : Tuple = 42
snake_case__ : Union[str, Any] = 42
snake_case__ : str = 42
snake_case__ : int = 42
for x_val, y_val in enumerate(__A ):
for col in range(__A ):
snake_case__ : List[Any] = (x_val + 1) ** (size - col - 1)
snake_case__ : Tuple = y_val
snake_case__ : Optional[int] = solve(__A , __A )
def interpolated_func(snake_case_ : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__A ) )
return interpolated_func
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def SCREAMING_SNAKE_CASE ( snake_case_ : Callable[[int], int] = question_function , snake_case_ : int = 10 ):
snake_case__ : Optional[Any] = [func(__A ) for x_val in range(1 , order + 1 )]
snake_case__ : Any = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
snake_case__ : List[str] = 0
snake_case__ : List[str] = 42
snake_case__ : str = 42
for poly in polynomials:
snake_case__ : Tuple = 1
while func(__A ) == poly(__A ):
x_val += 1
ret += poly(__A )
return ret
if __name__ == "__main__":
print(f"{solution() = }")
| 357 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , __A : int , __A : str=7 , __A : Union[str, Any]=3 , __A : Union[str, Any]=3_0 , __A : Optional[int]=4_0_0 , __A : Optional[Any]=True , __A : Optional[int]=None , __A : Union[str, Any]=True , __A : Optional[int]=[0.5, 0.5, 0.5] , __A : Any=[0.5, 0.5, 0.5] , __A : Optional[int]=True , __A : Optional[Any]=1 / 2_5_5 , __A : Union[str, Any]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ : Optional[Any] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
snake_case__ : List[Any] = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : Tuple = num_channels
snake_case__ : List[Any] = min_resolution
snake_case__ : Optional[Any] = max_resolution
snake_case__ : str = do_resize
snake_case__ : List[str] = size
snake_case__ : List[Any] = do_normalize
snake_case__ : Dict = image_mean
snake_case__ : List[Any] = image_std
snake_case__ : int = do_rescale
snake_case__ : Tuple = rescale_factor
snake_case__ : str = do_pad
def _lowercase ( self : List[str] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self : Optional[Any] , __A : Dict , __A : Union[str, Any]=False ):
if not batched:
snake_case__ : List[str] = image_inputs[0]
if isinstance(__A , Image.Image ):
snake_case__, snake_case__ : Any = image.size
else:
snake_case__, snake_case__ : List[str] = image.shape[1], image.shape[2]
if w < h:
snake_case__ : List[str] = int(self.size["shortest_edge"] * h / w )
snake_case__ : Tuple = self.size["shortest_edge"]
elif w > h:
snake_case__ : Optional[int] = self.size["shortest_edge"]
snake_case__ : Union[str, Any] = int(self.size["shortest_edge"] * w / h )
else:
snake_case__ : Optional[Any] = self.size["shortest_edge"]
snake_case__ : List[Any] = self.size["shortest_edge"]
else:
snake_case__ : Union[str, Any] = []
for image in image_inputs:
snake_case__, snake_case__ : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ : Any = max(__A , key=lambda __A : item[0] )[0]
snake_case__ : Optional[int] = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = DeformableDetrImageProcessor if is_vision_available() else None
def _lowercase ( self : Optional[int] ):
snake_case__ : str = DeformableDetrImageProcessingTester(self )
@property
def _lowercase ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Union[str, Any] ):
snake_case__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "image_mean" ) )
self.assertTrue(hasattr(__A , "image_std" ) )
self.assertTrue(hasattr(__A , "do_normalize" ) )
self.assertTrue(hasattr(__A , "do_resize" ) )
self.assertTrue(hasattr(__A , "do_rescale" ) )
self.assertTrue(hasattr(__A , "do_pad" ) )
self.assertTrue(hasattr(__A , "size" ) )
def _lowercase ( self : Tuple ):
snake_case__ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __A )
snake_case__ : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , __A )
def _lowercase ( self : Any ):
pass
def _lowercase ( self : Optional[int] ):
# Initialize image_processing
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Dict = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__, snake_case__ : Optional[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
snake_case__ : Union[str, Any] = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Any ):
# Initialize image_processing
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
snake_case__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Union[str, Any] = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Any = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Union[str, Any] ):
# Initialize image_processing
snake_case__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : List[Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Optional[Any] = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowercase ( self : Optional[int] ):
# prepare image and target
snake_case__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case__ : Any = json.loads(f.read() )
snake_case__ : List[str] = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
snake_case__ : Optional[Any] = DeformableDetrImageProcessor()
snake_case__ : Tuple = image_processing(images=__A , annotations=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : str = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : int = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : int = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : str = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : int = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : Optional[int] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify orig_size
snake_case__ : Union[str, Any] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : List[str] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
@slow
def _lowercase ( self : Union[str, Any] ):
# prepare image, target and masks_path
snake_case__ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case__ : Optional[int] = json.loads(f.read() )
snake_case__ : Any = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
snake_case__ : List[Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case__ : Dict = DeformableDetrImageProcessor(format="coco_panoptic" )
snake_case__ : List[Any] = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : Optional[int] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Tuple = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Any = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : Tuple = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : Union[str, Any] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify masks
snake_case__ : Optional[Any] = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A )
# verify orig_size
snake_case__ : Dict = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : Dict = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
| 286 | 0 |
'''simple docstring'''
import math
import qiskit
def a__ ( lowerCAmelCase__ = 1 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 1 ) -> qiskit.result.counts.Counts:
if (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
or isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
or isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(lowerCAmelCase__ ) != input_a)
or (math.floor(lowerCAmelCase__ ) != input_a)
or (math.floor(lowerCAmelCase__ ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
UpperCAmelCase__ : Union[str, Any] = qiskit.QuantumRegister(4 , '''qr''' )
UpperCAmelCase__ : Tuple = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
UpperCAmelCase__ : Any = [input_a, input_a, carry_in]
UpperCAmelCase__ : int = qiskit.QuantumCircuit(lowerCAmelCase__ , lowerCAmelCase__ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowerCAmelCase__ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowerCAmelCase__ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowerCAmelCase__ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowerCAmelCase__ ) # measure the last two qbits
UpperCAmelCase__ : List[str] = qiskit.Aer.get_backend('''aer_simulator''' )
UpperCAmelCase__ : List[str] = qiskit.execute(lowerCAmelCase__ , lowerCAmelCase__ , shots=10_00 )
return job.result().get_counts(lowerCAmelCase__ )
if __name__ == "__main__":
print(F"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 181 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> int:
UpperCAmelCase__ : Tuple = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def a__ ( lowerCAmelCase__ = 1_00 ) -> int:
UpperCAmelCase__ : Dict = 1
UpperCAmelCase__ : str = 2
for i in range(2 , max_n + 1 ):
UpperCAmelCase__ : Tuple = pre_numerator
UpperCAmelCase__ : Tuple = 2 * i // 3 if i % 3 == 0 else 1
UpperCAmelCase__ : str = cur_numerator
UpperCAmelCase__ : List[str] = e_cont * pre_numerator + temp
return sum_digits(lowerCAmelCase__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 181 | 1 |
import os
from datetime import datetime as dt
from github import Github
UpperCamelCase__ = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def _UpperCamelCase ():
"""simple docstring"""
UpperCamelCase__ = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCamelCase__ = g.get_repo("""huggingface/diffusers""" )
UpperCamelCase__ = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCamelCase__ = sorted(issue.get_comments() , key=lambda a__ : i.created_at , reverse=a__ )
UpperCamelCase__ = comments[0] if len(a__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main()
| 87 |
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
UpperCamelCase__ = logging.getLogger(__name__)
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : str = """token-classification"""
def __init__( self , __lowerCAmelCase ):
if type(__lowerCAmelCase ) == dict:
UpperCamelCase__ = Namespace(**__lowerCAmelCase )
UpperCamelCase__ = import_module("""tasks""" )
try:
UpperCamelCase__ = getattr(__lowerCAmelCase , hparams.task_type )
UpperCamelCase__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
UpperCamelCase__ = self.token_classification_task.get_labels(hparams.labels )
UpperCamelCase__ = CrossEntropyLoss().ignore_index
super().__init__(__lowerCAmelCase , len(self.labels ) , self.mode )
def _lowerCamelCase ( self , **__lowerCAmelCase ):
return self.model(**__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
UpperCamelCase__ = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCamelCase__ = self(**__lowerCAmelCase )
UpperCamelCase__ = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.hparams
for mode in ["train", "dev", "test"]:
UpperCamelCase__ = self._feature_file(__lowerCAmelCase )
if os.path.exists(__lowerCAmelCase ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , __lowerCAmelCase )
UpperCamelCase__ = torch.load(__lowerCAmelCase )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
UpperCamelCase__ = self.token_classification_task.read_examples_from_file(args.data_dir , __lowerCAmelCase )
UpperCamelCase__ = self.token_classification_task.convert_examples_to_features(
__lowerCAmelCase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__lowerCAmelCase , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , __lowerCAmelCase )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False ):
UpperCamelCase__ = self._feature_file(__lowerCAmelCase )
logger.info("""Loading features from cached file %s""" , __lowerCAmelCase )
UpperCamelCase__ = torch.load(__lowerCAmelCase )
UpperCamelCase__ = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
UpperCamelCase__ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
UpperCamelCase__ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
UpperCamelCase__ = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
UpperCamelCase__ = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , batch_size=__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""Compute validation""" ""
UpperCamelCase__ = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
UpperCamelCase__ = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCamelCase__ = self(**__lowerCAmelCase )
UpperCamelCase__ , UpperCamelCase__ = outputs[:2]
UpperCamelCase__ = logits.detach().cpu().numpy()
UpperCamelCase__ = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = torch.stack([x["""val_loss"""] for x in outputs] ).mean()
UpperCamelCase__ = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
UpperCamelCase__ = np.argmax(__lowerCAmelCase , axis=2 )
UpperCamelCase__ = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
UpperCamelCase__ = dict(enumerate(self.labels ) )
UpperCamelCase__ = [[] for _ in range(out_label_ids.shape[0] )]
UpperCamelCase__ = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
UpperCamelCase__ = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(__lowerCAmelCase , __lowerCAmelCase ),
"""precision""": precision_score(__lowerCAmelCase , __lowerCAmelCase ),
"""recall""": recall_score(__lowerCAmelCase , __lowerCAmelCase ),
"""f1""": fa_score(__lowerCAmelCase , __lowerCAmelCase ),
}
UpperCamelCase__ = dict(results.items() )
UpperCamelCase__ = results
return ret, preds_list, out_label_list
def _lowerCamelCase ( self , __lowerCAmelCase ):
# when stable
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._eval_end(__lowerCAmelCase )
UpperCamelCase__ = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowerCamelCase ( self , __lowerCAmelCase ):
# updating to test_epoch_end instead of deprecated test_end
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._eval_end(__lowerCAmelCase )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
UpperCamelCase__ = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase ):
# Add NER specific options
BaseTransformer.add_model_specific_args(__lowerCAmelCase , __lowerCAmelCase )
parser.add_argument(
"""--task_type""" , default="""NER""" , type=__lowerCAmelCase , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=__lowerCAmelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=__lowerCAmelCase , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=__lowerCAmelCase , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
UpperCamelCase__ = NERTransformer.add_model_specific_args(parser, os.getcwd())
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = NERTransformer(args)
UpperCamelCase__ = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
UpperCamelCase__ = sorted(glob.glob(os.path.join(args.output_dir, "checkpoint-epoch=*.ckpt"), recursive=True))
UpperCamelCase__ = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 87 | 1 |
from __future__ import annotations
def A (__A : list[int | str] ) -> None:
"""simple docstring"""
create_state_space_tree(__A , [] , 0 , [0 for i in range(len(__A ) )] )
def A (__A : list[int | str] , __A : list[int | str] , __A : int , __A : list[int] , ) -> None:
"""simple docstring"""
if index == len(__A ):
print(__A )
return
for i in range(len(__A ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
UpperCAmelCase_ = True
create_state_space_tree(__A , __A , index + 1 , __A )
current_sequence.pop()
UpperCAmelCase_ = False
snake_case_ : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
snake_case_ : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 51 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def A (__A : Optional[int] , __A : int , __A : str=None ) -> List[Any]:
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, F"""{torch_layer} layer.weight does not match"""
UpperCAmelCase_ = nn.Parameter(__A )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F"""{torch_layer} layer.bias does not match"""
UpperCAmelCase_ = nn.Parameter(__A )
def A (__A : Tuple , __A : Dict , __A : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = np.asarray(weights[0] )
UpperCAmelCase_ = np.asarray(weights[1] )
UpperCAmelCase_ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.output.dense , torch.tensor(__A ).view(-1 , __A ).contiguous().transpose(0 , 1 ) , )
def A (__A : Optional[Any] , __A : Any , __A : List[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ = np.asarray(weights[0] )
UpperCAmelCase_ = np.asarray(weights[1] )
UpperCAmelCase_ = np.asarray(weights[2] )
UpperCAmelCase_ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__A ).transpose(1 , 2 ).contiguous().view(-1 , __A ) , )
set_param(
torch_layer.output.dense , torch.tensor(__A ).view(-1 , __A ).contiguous().transpose(0 , 1 ) , )
def A (__A : int , __A : Union[str, Any] , __A : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = weights[0][0][0]
UpperCAmelCase_ = np.asarray(layer_norm_a[0] )
UpperCAmelCase_ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__A ) , torch.tensor(__A ) , )
# lsh weights + output
UpperCAmelCase_ = weights[0][1]
if len(__A ) < 4:
set_layer_weights_in_torch_lsh(__A , torch_block.attention , __A )
else:
set_layer_weights_in_torch_local(__A , torch_block.attention , __A )
# intermediate weighs
UpperCAmelCase_ = weights[2][0][1][2]
# Chunked Feed Forward
if len(__A ) == 4:
UpperCAmelCase_ = intermediate_weights[2]
# layernorm 2
UpperCAmelCase_ = np.asarray(intermediate_weights[0][0] )
UpperCAmelCase_ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__A ) , torch.tensor(__A ) , )
# intermediate dense
UpperCAmelCase_ = np.asarray(intermediate_weights[1][0] )
UpperCAmelCase_ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__A ).transpose(0 , 1 ).contiguous() , torch.tensor(__A ) , )
# intermediate out
UpperCAmelCase_ = np.asarray(intermediate_weights[4][0] )
UpperCAmelCase_ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__A ).transpose(0 , 1 ).contiguous() , torch.tensor(__A ) , )
def A (__A : Optional[int] , __A : Tuple , __A : Any ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = torch_model.reformer
# word embeds
UpperCAmelCase_ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__A ) , )
if isinstance(weights[3] , __A ):
UpperCAmelCase_ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
UpperCAmelCase_ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F"""{position_embeddings[emb_idx]} emb does not match"""
UpperCAmelCase_ = nn.Parameter(torch.tensor(__A ) )
UpperCAmelCase_ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__A ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
UpperCAmelCase_ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__A , __A , __A )
# output layer norm
UpperCAmelCase_ = np.asarray(weights[7][0] )
UpperCAmelCase_ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__A ) , torch.tensor(__A ) , )
# output embeddings
UpperCAmelCase_ = np.asarray(weights[9][0] )
UpperCAmelCase_ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__A ).transpose(0 , 1 ).contiguous() , torch.tensor(__A ) , )
def A (__A : Tuple , __A : int , __A : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = ReformerConfig.from_json_file(__A )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase_ = ReformerModelWithLMHead(__A )
with open(__A , '''rb''' ) as f:
UpperCAmelCase_ = pickle.load(__A )['''weights''']
set_model_weights_in_torch(__A , __A , config.hidden_size )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __A )
if __name__ == "__main__":
snake_case_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
snake_case_ : List[Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 51 | 1 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_lowercase : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : str, lowerCamelCase : bool, lowerCamelCase : Optional[int] = None, lowerCamelCase : Optional[int] = None )-> Any:
super().__init__()
lowerCamelCase__ : Optional[int] =learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
lowerCamelCase__ : Tuple =torch.zeros(lowerCamelCase, lowerCamelCase )
else:
lowerCamelCase__ : List[str] =None
lowerCamelCase__ : str =torch.nn.Parameter(lowerCamelCase )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 4_2
_a = 4_2
_a = 4_2
_a = 4_2
_a = 4_2
_a = 4_2
def __init__( self : List[str], lowerCamelCase : VQModel, lowerCamelCase : CLIPTextModel, lowerCamelCase : CLIPTokenizer, lowerCamelCase : TransformeraDModel, lowerCamelCase : VQDiffusionScheduler, lowerCamelCase : LearnedClassifierFreeSamplingEmbeddings, )-> List[str]:
super().__init__()
self.register_modules(
vqvae=lowerCamelCase, transformer=lowerCamelCase, text_encoder=lowerCamelCase, tokenizer=lowerCamelCase, scheduler=lowerCamelCase, learned_classifier_free_sampling_embeddings=lowerCamelCase, )
def snake_case ( self : List[str], lowerCamelCase : List[Any], lowerCamelCase : Dict, lowerCamelCase : List[Any] )-> int:
lowerCamelCase__ : Optional[int] =len(lowerCamelCase ) if isinstance(lowerCamelCase, lowerCamelCase ) else 1
# get prompt text embeddings
lowerCamelCase__ : Tuple =self.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=self.tokenizer.model_max_length, return_tensors='''pt''', )
lowerCamelCase__ : Any =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCamelCase__ : List[Any] =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowerCamelCase__ : List[str] =text_input_ids[:, : self.tokenizer.model_max_length]
lowerCamelCase__ : Tuple =self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
lowerCamelCase__ : Any =prompt_embeds / prompt_embeds.norm(dim=-1, keepdim=lowerCamelCase )
# duplicate text embeddings for each generation per prompt
lowerCamelCase__ : Dict =prompt_embeds.repeat_interleave(lowerCamelCase, dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
lowerCamelCase__ : Dict =self.learned_classifier_free_sampling_embeddings.embeddings
lowerCamelCase__ : Union[str, Any] =negative_prompt_embeds.unsqueeze(0 ).repeat(lowerCamelCase, 1, 1 )
else:
lowerCamelCase__ : Optional[Any] =[''''''] * batch_size
lowerCamelCase__ : Union[str, Any] =text_input_ids.shape[-1]
lowerCamelCase__ : List[str] =self.tokenizer(
lowerCamelCase, padding='''max_length''', max_length=lowerCamelCase, truncation=lowerCamelCase, return_tensors='''pt''', )
lowerCamelCase__ : Optional[int] =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
lowerCamelCase__ : List[Any] =negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1, keepdim=lowerCamelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCamelCase__ : int =negative_prompt_embeds.shape[1]
lowerCamelCase__ : Optional[Any] =negative_prompt_embeds.repeat(1, lowerCamelCase, 1 )
lowerCamelCase__ : int =negative_prompt_embeds.view(batch_size * num_images_per_prompt, lowerCamelCase, -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase__ : Union[str, Any] =torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : str, lowerCamelCase : Union[str, List[str]], lowerCamelCase : int = 100, lowerCamelCase : float = 5.0, lowerCamelCase : float = 1.0, lowerCamelCase : int = 1, lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None, lowerCamelCase : Optional[torch.FloatTensor] = None, lowerCamelCase : Optional[str] = "pil", lowerCamelCase : bool = True, lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, lowerCamelCase : int = 1, )-> Union[ImagePipelineOutput, Tuple]:
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] =1
elif isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : List[Any] =len(lowerCamelCase )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase )}''' )
lowerCamelCase__ : Tuple =batch_size * num_images_per_prompt
lowerCamelCase__ : Optional[Any] =guidance_scale > 1.0
lowerCamelCase__ : Dict =self._encode_prompt(lowerCamelCase, lowerCamelCase, lowerCamelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase, lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(lowerCamelCase )}.''' )
# get the initial completely masked latents unless the user supplied it
lowerCamelCase__ : Any =(batch_size, self.transformer.num_latent_pixels)
if latents is None:
lowerCamelCase__ : Union[str, Any] =self.transformer.num_vector_embeds - 1
lowerCamelCase__ : Optional[Any] =torch.full(lowerCamelCase, lowerCamelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
lowerCamelCase__ : List[Any] =latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase, device=self.device )
lowerCamelCase__ : Optional[int] =self.scheduler.timesteps.to(self.device )
lowerCamelCase__ : int =latents
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the sample if we are doing classifier free guidance
lowerCamelCase__ : Dict =torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
lowerCamelCase__ : Optional[int] =self.transformer(lowerCamelCase, encoder_hidden_states=lowerCamelCase, timestep=lowerCamelCase ).sample
if do_classifier_free_guidance:
lowerCamelCase__ : str =model_output.chunk(2 )
lowerCamelCase__ : Any =model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(lowerCamelCase, dim=1, keepdim=lowerCamelCase )
lowerCamelCase__ : str =self.truncate(lowerCamelCase, lowerCamelCase )
# remove `log(0)`'s (`-inf`s)
lowerCamelCase__ : Dict =model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase__ : List[str] =self.scheduler.step(lowerCamelCase, timestep=lowerCamelCase, sample=lowerCamelCase, generator=lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowerCamelCase__ : Any =self.vqvae.config.vq_embed_dim
lowerCamelCase__ : int =(batch_size, self.transformer.height, self.transformer.width, embedding_channels)
lowerCamelCase__ : Tuple =self.vqvae.quantize.get_codebook_entry(lowerCamelCase, shape=lowerCamelCase )
lowerCamelCase__ : str =self.vqvae.decode(lowerCamelCase, force_not_quantize=lowerCamelCase ).sample
lowerCamelCase__ : Optional[Any] =(image / 2 + 0.5).clamp(0, 1 )
lowerCamelCase__ : Dict =image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
lowerCamelCase__ : Dict =self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase )
def snake_case ( self : List[Any], lowerCamelCase : torch.FloatTensor, lowerCamelCase : float )-> torch.FloatTensor:
lowerCamelCase__ : Optional[Any] =torch.sort(lowerCamelCase, 1, descending=lowerCamelCase )
lowerCamelCase__ : Optional[Any] =torch.exp(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
lowerCamelCase__ : int =torch.full_like(keep_mask[:, 0:1, :], lowerCamelCase )
lowerCamelCase__ : Any =torch.cat((all_true, keep_mask), dim=1 )
lowerCamelCase__ : str =keep_mask[:, :-1, :]
lowerCamelCase__ : List[str] =keep_mask.gather(1, indices.argsort(1 ) )
lowerCamelCase__ : Optional[int] =log_p_x_0.clone()
lowerCamelCase__ : Union[str, Any] =-torch.inf # -inf = log(0)
return rv
| 367 |
"""simple docstring"""
import numpy as np
from PIL import Image
def snake_case__ ( __lowerCamelCase : np.ndarray , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =np.array(__lowerCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
lowerCamelCase__ : int =0
lowerCamelCase__ : int =0
lowerCamelCase__ : Optional[int] =0
lowerCamelCase__ : List[Any] =0
# compute the shape of the output matrix
lowerCamelCase__ : Union[str, Any] =(arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
lowerCamelCase__ : Union[str, Any] =np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
lowerCamelCase__ : str =np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCamelCase__ : Optional[int] =0
lowerCamelCase__ : Optional[int] =0
return updated_arr
def snake_case__ ( __lowerCamelCase : np.ndarray , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =np.array(__lowerCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
lowerCamelCase__ : str =0
lowerCamelCase__ : List[Any] =0
lowerCamelCase__ : Optional[int] =0
lowerCamelCase__ : List[Any] =0
# compute the shape of the output matrix
lowerCamelCase__ : Dict =(arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
lowerCamelCase__ : Optional[Any] =np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
lowerCamelCase__ : Optional[int] =int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCamelCase__ : Optional[Any] =0
lowerCamelCase__ : int =0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="avgpooling", verbose=True)
# Loading the image
_lowercase : int = Image.open("path_to_image")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 272 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''data2vec-vision'''
def __init__( self , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3_072 , lowerCamelCase__="gelu" , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-12 , lowerCamelCase__=224 , lowerCamelCase__=16 , lowerCamelCase__=3 , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=True , lowerCamelCase__=[3, 5, 7, 11] , lowerCamelCase__=[1, 2, 3, 6] , lowerCamelCase__=True , lowerCamelCase__=0.4 , lowerCamelCase__=256 , lowerCamelCase__=1 , lowerCamelCase__=False , lowerCamelCase__=255 , **lowerCamelCase__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = use_mask_token
__lowerCamelCase = use_absolute_position_embeddings
__lowerCamelCase = use_relative_position_bias
__lowerCamelCase = use_shared_relative_position_bias
__lowerCamelCase = layer_scale_init_value
__lowerCamelCase = drop_path_rate
__lowerCamelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
__lowerCamelCase = out_indices
__lowerCamelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
__lowerCamelCase = use_auxiliary_head
__lowerCamelCase = auxiliary_loss_weight
__lowerCamelCase = auxiliary_channels
__lowerCamelCase = auxiliary_num_convs
__lowerCamelCase = auxiliary_concat_input
__lowerCamelCase = semantic_loss_ignore_index
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = version.parse('''1.11''' )
@property
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowercase_ ( self ) -> float:
'''simple docstring'''
return 1e-4
| 90 |
from math import pi, sqrt, tan
def lowerCamelCase_ ( UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCamelCase_ ( UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def lowerCamelCase_ ( UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
__lowerCamelCase = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(UpperCamelCase__ , 2 ) * torus_radius * tube_radius
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def lowerCamelCase_ ( UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
__lowerCamelCase = (sidea + sidea + sidea) / 2
__lowerCamelCase = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def lowerCamelCase_ ( UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f'''Rectangle: {area_rectangle(10, 20) = }''')
print(f'''Square: {area_square(10) = }''')
print(f'''Triangle: {area_triangle(10, 10) = }''')
print(f'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(f'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(f'''Rhombus: {area_rhombus(10, 20) = }''')
print(f'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(f'''Circle: {area_circle(20) = }''')
print(f'''Ellipse: {area_ellipse(10, 20) = }''')
print("\nSurface Areas of various geometric shapes: \n")
print(f'''Cube: {surface_area_cube(20) = }''')
print(f'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(f'''Sphere: {surface_area_sphere(20) = }''')
print(f'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(f'''Cone: {surface_area_cone(10, 20) = }''')
print(f'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(f'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(f'''Torus: {surface_area_torus(20, 10) = }''')
print(f'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(f'''Square: {area_reg_polygon(4, 10) = }''')
print(f'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''')
| 90 | 1 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a_ :
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
lowerCamelCase__ : int = ''
lowerCamelCase__ : int = ''
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : str = 0
lowerCamelCase__ : Dict = 2_5_6
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Dict = 0
lowerCamelCase__ : int = 0
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = cva.imread(lowerCamelCase_, 0 )
lowerCamelCase__ : List[Any] = copy.deepcopy(self.img )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = plt.hist(self.img.ravel(), 2_5_6, [0, 2_5_6], label='x' )
lowerCamelCase__ : str = np.sum(lowerCamelCase_ )
for i in range(len(lowerCamelCase_ ) ):
lowerCamelCase__ : List[str] = x[i] / self.k
self.sk += prk
lowerCamelCase__ : List[str] = (self.L - 1) * self.sk
if self.rem != 0:
lowerCamelCase__ : Tuple = int(last % last )
lowerCamelCase__ : str = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowerCamelCase_ )
lowerCamelCase__ : str = int(np.ma.count(self.img ) / self.img[1].size )
lowerCamelCase__ : Any = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowerCamelCase__ : int = self.img[j][i]
if num != self.last_list[num]:
lowerCamelCase__ : Any = self.last_list[num]
cva.imwrite('output_data/output.jpg', self.img )
def a__ (self ):
'''simple docstring'''
plt.hist(self.img.ravel(), 2_5_6, [0, 2_5_6] )
def a__ (self ):
'''simple docstring'''
cva.imshow('Output-Image', self.img )
cva.imshow('Input-Image', self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
A_ : List[Any] = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
A_ : str = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 316 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=3_0, lowerCamelCase_=2, lowerCamelCase_=3, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=1_0, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=None, lowerCamelCase_=2, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : int = batch_size
lowerCamelCase__ : Dict = image_size
lowerCamelCase__ : List[str] = patch_size
lowerCamelCase__ : Union[str, Any] = num_channels
lowerCamelCase__ : str = is_training
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : Tuple = hidden_size
lowerCamelCase__ : str = num_hidden_layers
lowerCamelCase__ : Dict = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : Tuple = scope
lowerCamelCase__ : List[str] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCamelCase__ : str = (image_size // patch_size) ** 2
lowerCamelCase__ : Optional[int] = num_patches + 2
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = None
if self.use_labels:
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : List[str] = self.get_config()
return config, pixel_values, labels
def a__ (self ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = TFDeiTModel(config=lowerCamelCase_ )
lowerCamelCase__ : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = TFDeiTForMaskedImageModeling(config=lowerCamelCase_ )
lowerCamelCase__ : Any = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase__ : Tuple = 1
lowerCamelCase__ : Optional[Any] = TFDeiTForMaskedImageModeling(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = self.type_sequence_label_size
lowerCamelCase__ : Union[str, Any] = TFDeiTForImageClassification(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase__ : List[str] = 1
lowerCamelCase__ : Any = TFDeiTForImageClassification(lowerCamelCase_ )
lowerCamelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = config_and_inputs
lowerCamelCase__ : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Any = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
lowerCamelCase__ : Tuple = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
lowerCamelCase__ : Any = False
lowerCamelCase__ : Optional[Any] = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : int = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = TFDeiTModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self, config_class=lowerCamelCase_, has_text_modality=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer) )
lowerCamelCase__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_, tf.keras.layers.Dense ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(lowerCamelCase_ )
lowerCamelCase__ : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : str = [*signature.parameters.keys()]
lowerCamelCase__ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = super()._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def a__ (self ):
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : int = TFDeiTModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowerCamelCase_ ( ):
lowerCamelCase__ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ (self ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' )
lowerCamelCase__ : List[Any] = self.default_image_processor
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Optional[int] = image_processor(images=lowerCamelCase_, return_tensors='tf' )
# forward pass
lowerCamelCase__ : Tuple = model(**lowerCamelCase_ )
# verify the logits
lowerCamelCase__ : str = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, lowerCamelCase_ )
lowerCamelCase__ : Any = tf.constant([-1.0_266, 0.1_912, -1.2_861] )
self.assertTrue(np.allclose(outputs.logits[0, :3], lowerCamelCase_, atol=1e-4 ) )
| 316 | 1 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A : Union[str, Any] = logging.get_logger(__name__)
A : Optional[int] = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
A : Tuple = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
A : int = '</w>'
A : Dict = '@@ '
def __lowerCAmelCase ( a__ ) -> Any:
__a = set()
__a = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__a = char
return pairs
# Speech2Text2 has no max input length
A : Optional[Any] = {'facebook/s2t-wav2vec2-large-en-de': 1_0_2_4}
class __A( a ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ['''input_ids''', '''attention_mask''']
def __init__( self , _snake_case , _snake_case="<s>" , _snake_case="<pad>" , _snake_case="</s>" , _snake_case="<unk>" , _snake_case=False , _snake_case=None , **_snake_case , ) -> List[Any]:
'''simple docstring'''
super().__init__(
unk_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , pad_token=_snake_case , do_lower_case=_snake_case , **_snake_case , )
__a = do_lower_case
with open(_snake_case , encoding='''utf-8''' ) as vocab_handle:
__a = json.load(_snake_case )
__a = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
__a = None
__a = None
else:
with open(_snake_case , encoding='''utf-8''' ) as merges_handle:
__a = merges_handle.read().split('''\n''' )[:-1]
__a = [tuple(merge.split()[:2] ) for merge in merges]
__a = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
__a = {}
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return len(self.decoder )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Any:
'''simple docstring'''
__a = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
__a = get_pairs(_snake_case )
if not pairs:
return token
while True:
__a = min(_snake_case , key=lambda _snake_case : self.bpe_ranks.get(_snake_case , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__a , __a = bigram
__a = []
__a = 0
while i < len(_snake_case ):
try:
__a = word.index(_snake_case , _snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__a = j
if word[i] == first and i < len(_snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__a = tuple(_snake_case )
__a = new_word
if len(_snake_case ) == 1:
break
else:
__a = get_pairs(_snake_case )
__a = ''' '''.join(_snake_case )
if word == "\n " + BPE_TOKEN_MERGES:
__a = '''\n''' + BPE_TOKEN_MERGES
if word.endswith(_snake_case ):
__a = word.replace(_snake_case , '''''' )
__a = word.replace(''' ''' , _snake_case )
__a = word
return word
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[Any]:
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
'''This tokenizer was instantiated without a `merges.txt` file, so'''
''' that it can only be used for decoding, not for encoding.'''
'''Make sure to provide `merges.txt` file at instantiation to enable '''
'''encoding.''' )
if self.do_lower_case:
__a = text.lower()
__a = text.split()
__a = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_snake_case ).split(''' ''' ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int:
'''simple docstring'''
return self.encoder.get(_snake_case , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> str:
'''simple docstring'''
__a = self.decoder.get(_snake_case , self.unk_token )
return result
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> str:
'''simple docstring'''
__a = ''' '''.join(_snake_case )
# make sure @@ tokens are concatenated
__a = ''''''.join(string.split(_snake_case ) )
return string
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_snake_case ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__a = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__a = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_snake_case , ensure_ascii=_snake_case ) + '''\n''' )
__a = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _snake_case : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
__a = token_index
writer.write(''' '''.join(_snake_case ) + '''\n''' )
index += 1
return (vocab_file, merges_file) | 6 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : Optional[int] = {
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __A( a ):
snake_case_ = '''levit'''
def __init__( self , _snake_case=224 , _snake_case=3 , _snake_case=3 , _snake_case=2 , _snake_case=1 , _snake_case=16 , _snake_case=[128, 256, 384] , _snake_case=[4, 8, 12] , _snake_case=[4, 4, 4] , _snake_case=[16, 16, 16] , _snake_case=0 , _snake_case=[2, 2, 2] , _snake_case=[2, 2, 2] , _snake_case=0.02 , **_snake_case , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_snake_case )
__a = image_size
__a = num_channels
__a = kernel_size
__a = stride
__a = padding
__a = hidden_sizes
__a = num_attention_heads
__a = depths
__a = key_dim
__a = drop_path_rate
__a = patch_size
__a = attention_ratio
__a = mlp_ratio
__a = initializer_range
__a = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __A( a ):
snake_case_ = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> float:
'''simple docstring'''
return 1E-4 | 6 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Dict = ["""pixel_values"""]
def __init__( self : List[str] , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : bool = True , __lowerCamelCase : Union[int, float] = 1 / 255 , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : bool = True , **__lowerCamelCase : Optional[int] , ):
super().__init__(**__lowerCamelCase )
UpperCamelCase :Any = size if size is not None else {"""shortest_edge""": 224}
UpperCamelCase :Optional[int] = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
UpperCamelCase :Dict = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
UpperCamelCase :List[Any] = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase , param_name="""crop_size""" )
UpperCamelCase :Union[str, Any] = do_resize
UpperCamelCase :Any = size
UpperCamelCase :Dict = resample
UpperCamelCase :Union[str, Any] = do_center_crop
UpperCamelCase :int = crop_size
UpperCamelCase :Optional[int] = do_rescale
UpperCamelCase :str = rescale_factor
UpperCamelCase :Union[str, Any] = do_normalize
UpperCamelCase :Dict = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCamelCase :Dict = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCamelCase :Union[str, Any] = do_convert_rgb
def _A ( self : int , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Union[str, Any] , ):
UpperCamelCase :Union[str, Any] = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCamelCase :Union[str, Any] = get_resize_output_image_size(__lowerCamelCase , size=size["""shortest_edge"""] , default_to_square=__lowerCamelCase )
return resize(__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def _A ( self : Dict , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Dict , ):
UpperCamelCase :Optional[int] = get_size_dict(__lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__lowerCamelCase , size=(size["""height"""], size["""width"""]) , data_format=__lowerCamelCase , **__lowerCamelCase )
def _A ( self : Union[str, Any] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[int, float] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Union[str, Any] , ):
return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def _A ( self : int , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : List[str] , ):
return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def _A ( self : Dict , __lowerCamelCase : ImageInput , __lowerCamelCase : bool = None , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = None , __lowerCamelCase : bool = None , __lowerCamelCase : int = None , __lowerCamelCase : bool = None , __lowerCamelCase : float = None , __lowerCamelCase : bool = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : bool = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **__lowerCamelCase : int , ):
UpperCamelCase :Optional[int] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase :Union[str, Any] = size if size is not None else self.size
UpperCamelCase :List[str] = get_size_dict(__lowerCamelCase , param_name="""size""" , default_to_square=__lowerCamelCase )
UpperCamelCase :Dict = resample if resample is not None else self.resample
UpperCamelCase :Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase :List[str] = crop_size if crop_size is not None else self.crop_size
UpperCamelCase :Any = get_size_dict(__lowerCamelCase , param_name="""crop_size""" , default_to_square=__lowerCamelCase )
UpperCamelCase :Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase :int = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase :str = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase :Any = image_mean if image_mean is not None else self.image_mean
UpperCamelCase :Optional[Any] = image_std if image_std is not None else self.image_std
UpperCamelCase :List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase :Any = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase :str = [convert_to_rgb(__lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase :str = [to_numpy_array(__lowerCamelCase ) for image in images]
if do_resize:
UpperCamelCase :int = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase ) for image in images]
if do_center_crop:
UpperCamelCase :Tuple = [self.center_crop(image=__lowerCamelCase , size=__lowerCamelCase ) for image in images]
if do_rescale:
UpperCamelCase :List[str] = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase ) for image in images]
if do_normalize:
UpperCamelCase :Dict = [self.normalize(image=__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase ) for image in images]
UpperCamelCase :str = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images]
UpperCamelCase :str = {"""pixel_values""": images}
return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
| 62 |
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[int] ) -> Any:
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : dict[int, list[int]] ) -> list[tuple[int, int]]:
"""simple docstring"""
UpperCamelCase :Any = 0
UpperCamelCase :int = len(__magic_name__ ) # No of vertices in graph
UpperCamelCase :int = [0] * n
UpperCamelCase :Union[str, Any] = [False] * n
def dfs(__magic_name__ : str , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] ):
UpperCamelCase :Any = True
UpperCamelCase :str = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(__magic_name__ , __magic_name__ , __magic_name__ , id_ )
UpperCamelCase :Dict = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
UpperCamelCase :int = min(low[at] , low[to] )
UpperCamelCase :list[tuple[int, int]] = []
for i in range(__magic_name__ ):
if not visited[i]:
dfs(__magic_name__ , -1 , __magic_name__ , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 | 1 |
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : int ) -> bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32 |
from __future__ import annotations
UpperCAmelCase_ : Tuple = []
def SCREAMING_SNAKE_CASE_ ( __A : list[list[int]] , __A : int , __A : int ) -> bool:
"""simple docstring"""
for i in range(len(__A ) ):
if board[row][i] == 1:
return False
for i in range(len(__A ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(__A , -1 , -1 ) , range(__A , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(__A , -1 , -1 ) , range(__A , len(__A ) ) ):
if board[i][j] == 1:
return False
return True
def SCREAMING_SNAKE_CASE_ ( __A : list[list[int]] , __A : int ) -> bool:
"""simple docstring"""
if row >= len(__A ):
solution.append(__A )
printboard(__A )
print()
return True
for i in range(len(__A ) ):
if is_safe(__A , __A , __A ):
a_ : Any = 1
solve(__A , row + 1 )
a_ : Tuple = 0
return False
def SCREAMING_SNAKE_CASE_ ( __A : list[list[int]] ) -> None:
"""simple docstring"""
for i in range(len(__A ) ):
for j in range(len(__A ) ):
if board[i][j] == 1:
print('Q' , end=' ' )
else:
print('.' , end=' ' )
print()
# n=int(input("The no. of queens"))
UpperCAmelCase_ : List[str] = 8
UpperCAmelCase_ : str = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 32 | 1 |
"""simple docstring"""
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
UpperCAmelCase = HfApi()
UpperCAmelCase = {}
# fmt: off
UpperCAmelCase = torch.tensor([
-0.75_15, -1.68_83, 0.24_20, 0.03_00, 0.63_47, 1.34_33, -1.17_43, -3.74_67,
1.23_42, -2.24_85, 0.46_36, 0.80_76, -0.79_91, 0.39_69, 0.84_98, 0.91_89,
-1.88_87, -3.35_22, 0.76_39, 0.20_40, 0.62_71, -2.71_48, -1.63_16, 3.08_39,
0.31_86, 0.27_21, -0.97_59, -1.24_61, 2.62_57, 1.35_57
])
UpperCAmelCase = torch.tensor([
-2.36_39, -2.53_44, 0.00_54, -0.66_74, 1.59_90, 1.01_58, 0.31_24, -2.14_36,
1.87_95, -2.54_29, -0.15_66, -0.39_73, 1.24_90, 2.64_47, 1.22_83, -0.52_08,
-2.81_54, -3.51_19, 2.38_38, 1.20_33, 1.72_01, -2.12_56, -1.45_76, 2.79_48,
2.42_04, -0.97_52, -1.25_46, 0.80_27, 3.27_58, 3.13_65
])
UpperCAmelCase = torch.tensor([
-0.65_31, -0.68_91, -0.31_72, -0.53_75, -0.91_40, -0.53_67, -0.11_75, -0.78_69,
-0.38_08, -0.45_13, -0.20_98, -0.00_83, 0.31_83, 0.51_40, 0.22_47, -0.13_04,
-0.13_02, -0.28_02, -0.20_84, -0.20_25, -0.49_67, -0.48_73, -0.08_61, 0.69_25,
0.02_50, 0.12_90, -0.15_43, 0.63_16, 1.04_60, 1.49_43
])
UpperCAmelCase = torch.tensor([
0.09_11, 0.11_07, 0.01_82, 0.04_35, -0.08_05, -0.06_08, 0.03_81, 0.21_72,
-0.02_80, 0.13_27, -0.02_99, -0.02_55, -0.00_50, -0.11_70, -0.10_46, 0.03_09,
0.13_67, 0.17_28, -0.05_33, -0.07_48, -0.05_34, 0.16_24, 0.03_84, -0.18_05,
-0.07_07, 0.06_42, 0.02_20, -0.01_34, -0.13_33, -0.15_05
])
UpperCAmelCase = torch.tensor([
0.13_21, 0.13_37, 0.04_40, 0.06_22, -0.05_91, -0.03_70, 0.05_03, 0.21_33,
-0.01_77, 0.14_15, -0.01_16, -0.01_12, 0.00_44, -0.09_80, -0.07_89, 0.03_95,
0.15_02, 0.17_85, -0.04_88, -0.05_14, -0.04_04, 0.15_39, 0.04_54, -0.15_59,
-0.06_65, 0.06_59, 0.03_83, -0.00_05, -0.12_66, -0.13_86
])
UpperCAmelCase = torch.tensor([
0.11_54, 0.12_18, 0.03_07, 0.05_26, -0.07_11, -0.05_41, 0.03_66, 0.20_78,
-0.02_67, 0.13_17, -0.02_26, -0.01_93, -0.00_14, -0.10_55, -0.09_02, 0.03_30,
0.13_91, 0.17_09, -0.05_62, -0.06_93, -0.05_60, 0.14_82, 0.03_81, -0.16_83,
-0.06_81, 0.06_61, 0.03_31, -0.00_46, -0.12_68, -0.14_31
])
UpperCAmelCase = torch.tensor([
0.11_92, 0.12_40, 0.04_14, 0.06_06, -0.05_57, -0.04_12, 0.04_30, 0.20_42,
-0.02_00, 0.13_85, -0.01_15, -0.01_32, 0.00_17, -0.09_65, -0.08_02, 0.03_98,
0.14_33, 0.17_47, -0.04_58, -0.05_33, -0.04_07, 0.15_45, 0.04_19, -0.15_74,
-0.06_45, 0.06_26, 0.03_41, -0.00_10, -0.11_99, -0.13_90
])
UpperCAmelCase = torch.tensor([
0.10_75, 0.10_74, 0.02_05, 0.04_31, -0.07_74, -0.06_07, 0.02_98, 0.20_42,
-0.03_20, 0.12_67, -0.02_81, -0.02_50, -0.00_64, -0.10_91, -0.09_46, 0.02_90,
0.13_28, 0.16_50, -0.05_80, -0.07_38, -0.05_86, 0.14_40, 0.03_37, -0.17_46,
-0.07_12, 0.06_05, 0.02_50, -0.00_99, -0.13_16, -0.14_73
])
UpperCAmelCase = torch.tensor([
-1.45_72, -2.04_81, -0.04_14, -0.60_05, 1.41_36, 0.58_48, 0.40_28, -2.73_30,
1.22_12, -2.12_28, 0.21_55, 0.40_39, 0.76_62, 2.05_35, 0.74_77, -0.32_43,
-2.17_58, -2.76_48, 1.69_47, 0.70_26, 1.23_38, -1.60_78, -0.86_82, 2.28_10,
1.85_74, -0.57_18, -0.55_86, -0.01_86, 2.34_15, 2.12_51])
UpperCAmelCase = torch.tensor([
-1.36_90, -1.97_20, -0.40_90, -0.69_66, 1.46_60, 0.99_38, -0.13_85, -2.73_24,
0.77_36, -1.89_17, 0.29_23, 0.42_93, 0.16_93, 1.41_12, 1.18_87, -0.31_81,
-2.21_60, -2.63_81, 1.31_70, 0.81_63, 0.92_40, -1.65_44, -0.60_99, 2.52_59,
1.64_30, -0.90_90, -0.93_92, -0.01_26, 2.42_68, 2.32_66
])
UpperCAmelCase = torch.tensor([
-1.35_25, -1.96_28, -0.39_56, -0.68_60, 1.46_64, 1.00_14, -0.12_59, -2.72_12,
0.77_72, -1.88_11, 0.29_96, 0.43_88, 0.17_04, 1.40_29, 1.17_01, -0.30_27,
-2.20_53, -2.62_87, 1.33_50, 0.81_31, 0.92_74, -1.62_92, -0.60_98, 2.51_31,
1.65_05, -0.89_58, -0.92_98, -0.01_51, 2.42_57, 2.33_55
])
UpperCAmelCase = torch.tensor([
-2.05_85, -2.78_97, -0.28_50, -0.89_40, 1.90_52, 0.57_02, 0.63_45, -3.89_59,
1.59_32, -3.23_19, 0.19_74, 0.02_87, 1.75_66, 2.65_43, 0.83_87, -0.53_51,
-3.27_36, -4.33_75, 2.90_29, 1.63_90, 1.46_40, -2.17_01, -1.90_13, 2.93_41,
3.49_81, -0.62_55, -1.16_44, -0.15_91, 3.70_97, 3.20_66
])
UpperCAmelCase = torch.tensor([
-2.31_39, -2.55_94, -0.01_97, -0.67_85, 1.70_01, 1.16_06, 0.30_75, -2.17_40,
1.80_71, -2.56_30, -0.09_26, -0.38_11, 1.21_16, 2.62_46, 1.27_31, -0.53_98,
-2.81_53, -3.61_40, 2.38_93, 1.32_62, 1.62_58, -2.18_56, -1.32_67, 2.83_95,
2.37_79, -1.06_23, -1.24_68, 0.89_59, 3.33_67, 3.22_43
])
UpperCAmelCase = torch.tensor([
-2.06_28, -2.76_67, -0.20_89, -0.82_63, 2.05_39, 0.59_92, 0.64_95, -3.83_36,
1.60_25, -3.28_17, 0.17_21, -0.06_33, 1.75_16, 2.70_39, 0.81_00, -0.59_08,
-3.21_13, -4.43_43, 2.92_57, 1.36_32, 1.55_62, -2.14_89, -1.98_94, 3.05_60,
3.33_96, -0.73_28, -1.04_17, 0.03_83, 3.70_93, 3.23_43
])
UpperCAmelCase = torch.tensor([
-1.45_74, -2.05_69, -0.04_73, -0.61_17, 1.40_18, 0.57_69, 0.41_29, -2.73_44,
1.22_41, -2.13_97, 0.20_00, 0.39_37, 0.76_16, 2.04_53, 0.73_24, -0.33_91,
-2.17_46, -2.77_44, 1.69_63, 0.69_21, 1.21_87, -1.61_72, -0.88_77, 2.24_39,
1.84_71, -0.58_39, -0.56_05, -0.04_64, 2.32_50, 2.12_19
])
# fmt: on
UpperCAmelCase = api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
UpperCAmelCase = '/home/patrick/google_checkpoints/' + mod.modelId.split("""/""")[-1]
print(F'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith("""CompVis"""):
UpperCAmelCase = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
UpperCAmelCase = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
UpperCAmelCase = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
UpperCAmelCase = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
UpperCAmelCase = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1E-3
)
print(F'''{mod.modelId} has passed successfully!!!''')
| 368 | """simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( _lowercase):
snake_case__ = ['''image_processor''', '''tokenizer''']
snake_case__ = '''BlipImageProcessor'''
snake_case__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] ) -> int:
_UpperCamelCase = False
super().__init__(__UpperCamelCase , __UpperCamelCase )
_UpperCamelCase = self.image_processor
def __call__( self : Any , __UpperCamelCase : ImageInput = None , __UpperCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __UpperCamelCase : bool = True , __UpperCamelCase : Union[bool, str, PaddingStrategy] = False , __UpperCamelCase : Union[bool, str, TruncationStrategy] = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : int = 0 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[str, TensorType]] = None , **__UpperCamelCase : List[str] , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
_UpperCamelCase = self.tokenizer
_UpperCamelCase = self.tokenizer(
text=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
return text_encoding
# add pixel_values
_UpperCamelCase = self.image_processor(__UpperCamelCase , return_tensors=__UpperCamelCase )
if text is not None:
_UpperCamelCase = self.tokenizer(
text=__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , stride=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_overflowing_tokens=__UpperCamelCase , return_special_tokens_mask=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_length=__UpperCamelCase , verbose=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase , )
else:
_UpperCamelCase = None
if text_encoding is not None:
encoding_image_processor.update(__UpperCamelCase )
return encoding_image_processor
def _UpperCamelCase ( self : Union[str, Any] , *__UpperCamelCase : str , **__UpperCamelCase : Any ) -> List[Any]:
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def _UpperCamelCase ( self : Optional[int] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : str ) -> str:
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def _UpperCamelCase ( self : List[str] ) -> Dict:
_UpperCamelCase = self.tokenizer.model_input_names
_UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 54 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = ['''pixel_values''']
def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase)
__A : Union[str, Any] = size if size is not None else {'shortest_edge': 224}
__A : str = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase)
__A : Optional[Any] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
__A : Optional[Any] = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase , param_name='crop_size')
__A : Optional[int] = do_resize
__A : int = size
__A : Optional[Any] = resample
__A : List[str] = do_center_crop
__A : Any = crop_size
__A : List[Any] = do_rescale
__A : Dict = rescale_factor
__A : Any = do_normalize
__A : Optional[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__A : Optional[int] = image_std if image_std is not None else OPENAI_CLIP_STD
__A : Optional[int] = do_convert_rgb
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Tuple = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase)
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}')
__A : Optional[int] = get_resize_output_image_size(_UpperCAmelCase , size=size['shortest_edge'] , default_to_square=_UpperCAmelCase)
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Any = get_size_dict(_UpperCAmelCase)
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}')
return center_crop(_UpperCAmelCase , size=(size['height'], size['width']) , data_format=_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ):
'''simple docstring'''
__A : List[Any] = do_resize if do_resize is not None else self.do_resize
__A : int = size if size is not None else self.size
__A : Optional[Any] = get_size_dict(_UpperCAmelCase , param_name='size' , default_to_square=_UpperCAmelCase)
__A : Dict = resample if resample is not None else self.resample
__A : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
__A : List[Any] = crop_size if crop_size is not None else self.crop_size
__A : Dict = get_size_dict(_UpperCAmelCase , param_name='crop_size' , default_to_square=_UpperCAmelCase)
__A : Tuple = do_rescale if do_rescale is not None else self.do_rescale
__A : str = rescale_factor if rescale_factor is not None else self.rescale_factor
__A : Any = do_normalize if do_normalize is not None else self.do_normalize
__A : int = image_mean if image_mean is not None else self.image_mean
__A : Optional[Any] = image_std if image_std is not None else self.image_std
__A : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__A : Dict = make_list_of_images(_UpperCAmelCase)
if not valid_images(_UpperCAmelCase):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__A : Any = [convert_to_rgb(_UpperCAmelCase) for image in images]
# All transformations expect numpy arrays.
__A : Optional[Any] = [to_numpy_array(_UpperCAmelCase) for image in images]
if do_resize:
__A : int = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase) for image in images]
if do_center_crop:
__A : Optional[int] = [self.center_crop(image=_UpperCAmelCase , size=_UpperCAmelCase) for image in images]
if do_rescale:
__A : Dict = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase) for image in images]
if do_normalize:
__A : List[str] = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase) for image in images]
__A : Any = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase) for image in images]
__A : List[str] = {'pixel_values': images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase) | 190 |
'''simple docstring'''
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def _lowerCAmelCase ( __snake_case : float , __snake_case : float , __snake_case : bool = False ) -> list[float]:
if radian_mode:
return [magnitude * cos(__snake_case ), magnitude * sin(__snake_case )]
return [magnitude * cos(radians(__snake_case ) ), magnitude * sin(radians(__snake_case ) )]
def _lowerCAmelCase ( __snake_case : NDArray[floataa] , __snake_case : NDArray[floataa] , __snake_case : float = 10**-1 ) -> bool:
__A : NDArray[floataa] = cross(__snake_case , __snake_case )
__A : float = sum(__snake_case )
return abs(__snake_case ) < eps
if __name__ == "__main__":
# Test to check if it works
lowercase__ : List[Any] = array(
[
polar_force(718.4, 1_80 - 30),
polar_force(879.54, 45),
polar_force(1_00, -90),
]
)
lowercase__ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowercase__ : Dict = array(
[
polar_force(30 * 9.81, 15),
polar_force(2_15, 1_80 - 45),
polar_force(2_64, 90 - 30),
]
)
lowercase__ : Any = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowercase__ : Optional[int] = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]])
lowercase__ : Optional[int] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod() | 190 | 1 |
'''simple docstring'''
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_SCREAMING_SNAKE_CASE = logging.getLogger()
def _lowerCAmelCase ( ):
__lowercase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
__lowercase = parser.parse_args()
return args.f
class __lowercase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = logging.StreamHandler(sys.stdout )
logger.addHandler(A__ )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 ,'''run_glue_deebert.py''' )
with patch.object(A__ ,'''argv''' ,A__ ):
__lowercase = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(A__ ,0.6_6_6 )
@slow
@require_torch_non_multi_gpu
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = '''
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
'''.split()
self.run_and_check(A__ )
__lowercase = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(A__ )
__lowercase = '''
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
'''.split()
self.run_and_check(A__ )
| 359 |
'''simple docstring'''
import heapq
def _lowerCAmelCase ( lowerCamelCase_ : dict ):
__lowercase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase_ , [-1 * len(lowerCamelCase_ ), (key, value)] )
# chosen_vertices = set of chosen vertices
__lowercase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
__lowercase = heapq.heappop(lowerCamelCase_ )[1][0]
chosen_vertices.add(lowerCamelCase_ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
__lowercase = elem[1][1].index(lowerCamelCase_ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase_ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 217 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'vit'
def __init__( self : int ,snake_case : Any=768 ,snake_case : List[Any]=12 ,snake_case : str=12 ,snake_case : Union[str, Any]=3072 ,snake_case : Optional[Any]="gelu" ,snake_case : str=0.0 ,snake_case : Tuple=0.0 ,snake_case : List[str]=0.02 ,snake_case : Optional[int]=1e-12 ,snake_case : Any=224 ,snake_case : List[Any]=16 ,snake_case : List[str]=3 ,snake_case : int=True ,snake_case : Union[str, Any]=16 ,**snake_case : Union[str, Any] ,):
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =layer_norm_eps
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =patch_size
SCREAMING_SNAKE_CASE =num_channels
SCREAMING_SNAKE_CASE =qkv_bias
SCREAMING_SNAKE_CASE =encoder_stride
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = version.parse('1.11' )
@property
def _lowerCAmelCase ( self : List[str] ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _lowerCAmelCase ( self : Union[str, Any] ):
return 1e-4
| 334 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'transfo-xl'
__UpperCAmelCase = ['mems']
__UpperCAmelCase = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] ,snake_case : List[Any]=267735 ,snake_case : Optional[int]=[20000, 40000, 200000] ,snake_case : int=1024 ,snake_case : Optional[Any]=1024 ,snake_case : Tuple=16 ,snake_case : int=64 ,snake_case : Union[str, Any]=4096 ,snake_case : List[str]=4 ,snake_case : int=False ,snake_case : int=18 ,snake_case : Tuple=1600 ,snake_case : List[str]=1000 ,snake_case : Optional[Any]=True ,snake_case : List[str]=True ,snake_case : Optional[Any]=0 ,snake_case : Optional[Any]=-1 ,snake_case : List[Any]=True ,snake_case : Optional[Any]=0.1 ,snake_case : Union[str, Any]=0.0 ,snake_case : int=True ,snake_case : Any="normal" ,snake_case : int=0.01 ,snake_case : int=0.01 ,snake_case : str=0.02 ,snake_case : Any=1e-5 ,snake_case : Optional[int]=0 ,**snake_case : List[Any] ,):
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =[]
self.cutoffs.extend(snake_case )
if proj_share_all_but_first:
SCREAMING_SNAKE_CASE =[False] + [True] * len(self.cutoffs )
else:
SCREAMING_SNAKE_CASE =[False] + [False] * len(self.cutoffs )
SCREAMING_SNAKE_CASE =d_model
SCREAMING_SNAKE_CASE =d_embed
SCREAMING_SNAKE_CASE =d_head
SCREAMING_SNAKE_CASE =d_inner
SCREAMING_SNAKE_CASE =div_val
SCREAMING_SNAKE_CASE =pre_lnorm
SCREAMING_SNAKE_CASE =n_layer
SCREAMING_SNAKE_CASE =n_head
SCREAMING_SNAKE_CASE =mem_len
SCREAMING_SNAKE_CASE =same_length
SCREAMING_SNAKE_CASE =attn_type
SCREAMING_SNAKE_CASE =clamp_len
SCREAMING_SNAKE_CASE =sample_softmax
SCREAMING_SNAKE_CASE =adaptive
SCREAMING_SNAKE_CASE =dropout
SCREAMING_SNAKE_CASE =dropatt
SCREAMING_SNAKE_CASE =untie_r
SCREAMING_SNAKE_CASE =init
SCREAMING_SNAKE_CASE =init_range
SCREAMING_SNAKE_CASE =proj_init_std
SCREAMING_SNAKE_CASE =init_std
SCREAMING_SNAKE_CASE =layer_norm_epsilon
super().__init__(eos_token_id=snake_case ,**snake_case )
@property
def _lowerCAmelCase ( self : str ):
# Message copied from Transformer-XL documentation
logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Dict ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 334 | 1 |
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
lowercase__ = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowercase__ = concatenate_datasets
lowercase__ = DownloadConfig
lowercase__ = DownloadManager
lowercase__ = DownloadMode
lowercase__ = DownloadConfig
lowercase__ = DownloadMode
lowercase__ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 203 | """simple docstring"""
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
a__ = DebertaTokenizer
a__ = True
a__ = DebertaTokenizerFast
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a__: List[str] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'[UNK]',
]
a__: List[str] = dict(zip(lowercase , range(len(lowercase))))
a__: Any = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
a__: Optional[Any] = {'unk_token': '[UNK]'}
a__: Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
a__: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(lowercase) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(lowercase))
def lowerCamelCase_ ( self , **lowercase) -> int:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase)
def lowerCamelCase_ ( self , lowercase) -> Dict:
'''simple docstring'''
a__: Tuple = 'lower newer'
a__: int = 'lower newer'
return input_text, output_text
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: Optional[int] = self.get_tokenizer()
a__: List[Any] = 'lower newer'
a__: Optional[int] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
a__: Optional[Any] = tokenizer.tokenize(lowercase)
self.assertListEqual(lowercase , lowercase)
a__: int = tokens + [tokenizer.unk_token]
a__: Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase) , lowercase)
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: int = self.get_tokenizer()
a__: Any = tokenizer('Hello' , 'World')
a__: Optional[Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['token_type_ids'] , lowercase)
@slow
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Dict = self.tokenizer_class.from_pretrained('microsoft/deberta-base')
a__: Union[str, Any] = tokenizer.encode('sequence builders' , add_special_tokens=lowercase)
a__: Optional[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase)
a__: List[str] = tokenizer.encode(
'sequence builders' , add_special_tokens=lowercase , add_prefix_space=lowercase)
a__: Any = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=lowercase , add_prefix_space=lowercase)
a__: Optional[int] = tokenizer.build_inputs_with_special_tokens(lowercase)
a__: Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase)
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: List[Any] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class)
for tokenizer_class in tokenizer_classes:
a__: int = tokenizer_class.from_pretrained('microsoft/deberta-base')
a__: List[str] = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
a__: Dict = tokenizer(lowercase , padding=lowercase)
a__: Union[str, Any] = [tokenizer.decode(lowercase , skip_special_tokens=lowercase) for seq in encoding['input_ids']]
# fmt: off
a__: Any = {
'input_ids': [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
'token_type_ids': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
a__: str = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
self.assertDictEqual(encoding.data , lowercase)
for expected, decoded in zip(lowercase , lowercase):
self.assertEqual(lowercase , lowercase)
| 203 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase_ = logging.get_logger(__name__)
class UpperCamelCase_ (__A , __A ):
__magic_name__ = '''maskformer-swin'''
__magic_name__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Any , lowerCAmelCase_ : Dict=224 , lowerCAmelCase_ : Any=4 , lowerCAmelCase_ : Optional[Any]=3 , lowerCAmelCase_ : List[str]=96 , lowerCAmelCase_ : List[Any]=[2, 2, 6, 2] , lowerCAmelCase_ : str=[3, 6, 12, 24] , lowerCAmelCase_ : str=7 , lowerCAmelCase_ : Union[str, Any]=4.0 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : Union[str, Any]="gelu" , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : Tuple=0.0_2 , lowerCAmelCase_ : str=1e-5 , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Dict=None , **lowerCAmelCase_ : List[Any] , ) -> Tuple:
super().__init__(**lowerCAmelCase_ )
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : Tuple = patch_size
UpperCAmelCase_ : str = num_channels
UpperCAmelCase_ : Optional[int] = embed_dim
UpperCAmelCase_ : List[Any] = depths
UpperCAmelCase_ : Dict = len(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = num_heads
UpperCAmelCase_ : List[str] = window_size
UpperCAmelCase_ : Any = mlp_ratio
UpperCAmelCase_ : int = qkv_bias
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = drop_path_rate
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : Optional[int] = use_absolute_embeddings
UpperCAmelCase_ : Dict = layer_norm_eps
UpperCAmelCase_ : Optional[Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ : Union[str, Any] = int(embed_dim * 2 ** (len(lowerCAmelCase_ ) - 1) )
UpperCAmelCase_ : Union[str, Any] = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(lowerCAmelCase_ ) + 1 )]
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , stage_names=self.stage_names )
| 268 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__A )
class UpperCamelCase_ (__A ):
def __init__( self : int , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : List[str] ) -> Optional[Any]:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Optional[int]=None ) -> List[Any]:
UpperCAmelCase_ : str = {}
if top_k is not None:
UpperCAmelCase_ : List[str] = top_k
return {}, {}, postprocess_params
def __call__( self : str , lowerCAmelCase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCAmelCase_ : Any ) -> Tuple:
return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : str ) -> Any:
UpperCAmelCase_ : Tuple = load_image(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = self.image_processor(images=lowerCAmelCase_ , return_tensors=self.framework )
return model_inputs
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Dict ) -> str:
UpperCAmelCase_ : Any = self.model(**lowerCAmelCase_ )
return model_outputs
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int]=5 ) -> Any:
if top_k > self.model.config.num_labels:
UpperCAmelCase_ : int = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ : str = model_outputs.logits.softmax(-1 )[0]
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = probs.topk(lowerCAmelCase_ )
elif self.framework == "tf":
UpperCAmelCase_ : str = stable_softmax(model_outputs.logits , axis=-1 )[0]
UpperCAmelCase_ : Union[str, Any] = tf.math.top_k(lowerCAmelCase_ , k=lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
UpperCAmelCase_ : int = scores.tolist()
UpperCAmelCase_ : Optional[Any] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase_ , lowerCAmelCase_ )]
| 268 | 1 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowercase ( ) ->Optional[int]:
"""simple docstring"""
raise RuntimeError('''CUDA out of memory.''' )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
super().__init__()
__snake_case : Optional[int] = nn.Linear(3 , 4 )
__snake_case : str = nn.BatchNormad(4 )
__snake_case : List[str] = nn.Linear(4 , 5 )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(a_ ) ) )
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(a_ ):
nonlocal batch_sizes
batch_sizes.append(a_ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(a_ , [1_28, 64, 32, 16, 8] )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(a_ , a_ ):
nonlocal batch_sizes
batch_sizes.append(a_ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
__snake_case , __snake_case : Optional[int] = mock_training_loop_function('''hello''' )
self.assertListEqual(a_ , [1_28, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(a_ ):
pass
with self.assertRaises(a_ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(a_ ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(a_ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(a_ , a_ , a_ ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(a_ ) as cm:
mock_training_loop_function(1_28 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(a_ ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(a_ ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = torch.cuda.memory_allocated()
__snake_case : Optional[int] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , a_ )
__snake_case : List[str] = release_memory(a_ )
self.assertEqual(torch.cuda.memory_allocated() , a_ )
| 24 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : List[str] = {
"""configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""],
"""tokenization_luke""": ["""LukeTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
"""LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LukeForEntityClassification""",
"""LukeForEntityPairClassification""",
"""LukeForEntitySpanClassification""",
"""LukeForMultipleChoice""",
"""LukeForQuestionAnswering""",
"""LukeForSequenceClassification""",
"""LukeForTokenClassification""",
"""LukeForMaskedLM""",
"""LukeModel""",
"""LukePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 24 | 1 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCamelCase (_snake_case ):
_lowercase = (DDPMParallelScheduler,)
def snake_case_ ( self: Union[str, Any],**A_: List[Any] ):
'''simple docstring'''
__UpperCamelCase = {
'num_train_timesteps': 1000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**UpperCamelCase__ )
return config
def snake_case_ ( self: int ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1],[0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCamelCase__,beta_end=UpperCamelCase__ )
def snake_case_ ( self: Any ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase__ )
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCamelCase__ )
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase__ )
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
self.check_over_configs(thresholding=UpperCamelCase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCamelCase__,prediction_type=UpperCamelCase__,sample_max_value=UpperCamelCase__,)
def snake_case_ ( self: int ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def snake_case_ ( self: str ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCamelCase__ )
def snake_case_ ( self: str ):
'''simple docstring'''
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**UpperCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1E-5
def snake_case_ ( self: List[str] ):
'''simple docstring'''
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**UpperCamelCase__ )
__UpperCamelCase = len(UpperCamelCase__ )
__UpperCamelCase = self.dummy_model()
__UpperCamelCase = self.dummy_sample_deter
__UpperCamelCase = self.dummy_sample_deter + 0.1
__UpperCamelCase = self.dummy_sample_deter - 0.1
__UpperCamelCase = samplea.shape[0]
__UpperCamelCase = torch.stack([samplea, samplea, samplea],dim=0 )
__UpperCamelCase = torch.arange(UpperCamelCase__ )[0:3, None].repeat(1,UpperCamelCase__ )
__UpperCamelCase = model(samples.flatten(0,1 ),timesteps.flatten(0,1 ) )
__UpperCamelCase = scheduler.batch_step_no_noise(UpperCamelCase__,timesteps.flatten(0,1 ),samples.flatten(0,1 ) )
__UpperCamelCase = torch.sum(torch.abs(UpperCamelCase__ ) )
__UpperCamelCase = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1E-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1E-3
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**UpperCamelCase__ )
__UpperCamelCase = len(UpperCamelCase__ )
__UpperCamelCase = self.dummy_model()
__UpperCamelCase = self.dummy_sample_deter
__UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase__ ) ):
# 1. predict noise residual
__UpperCamelCase = model(UpperCamelCase__,UpperCamelCase__ )
# 2. predict previous mean of sample x_t-1
__UpperCamelCase = scheduler.step(UpperCamelCase__,UpperCamelCase__,UpperCamelCase__,generator=UpperCamelCase__ ).prev_sample
__UpperCamelCase = pred_prev_sample
__UpperCamelCase = torch.sum(torch.abs(UpperCamelCase__ ) )
__UpperCamelCase = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1E-3
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config(prediction_type='v_prediction' )
__UpperCamelCase = scheduler_class(**UpperCamelCase__ )
__UpperCamelCase = len(UpperCamelCase__ )
__UpperCamelCase = self.dummy_model()
__UpperCamelCase = self.dummy_sample_deter
__UpperCamelCase = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase__ ) ):
# 1. predict noise residual
__UpperCamelCase = model(UpperCamelCase__,UpperCamelCase__ )
# 2. predict previous mean of sample x_t-1
__UpperCamelCase = scheduler.step(UpperCamelCase__,UpperCamelCase__,UpperCamelCase__,generator=UpperCamelCase__ ).prev_sample
__UpperCamelCase = pred_prev_sample
__UpperCamelCase = torch.sum(torch.abs(UpperCamelCase__ ) )
__UpperCamelCase = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1E-3
def snake_case_ ( self: Tuple ):
'''simple docstring'''
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**UpperCamelCase__ )
__UpperCamelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCamelCase__ )
__UpperCamelCase = scheduler.timesteps
for i, timestep in enumerate(UpperCamelCase__ ):
if i == len(UpperCamelCase__ ) - 1:
__UpperCamelCase = -1
else:
__UpperCamelCase = timesteps[i + 1]
__UpperCamelCase = scheduler.previous_timestep(UpperCamelCase__ )
__UpperCamelCase = prev_t.item()
self.assertEqual(UpperCamelCase__,UpperCamelCase__ )
def snake_case_ ( self: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**UpperCamelCase__ )
__UpperCamelCase = [100, 87, 50, 51, 0]
with self.assertRaises(UpperCamelCase__,msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=UpperCamelCase__ )
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**UpperCamelCase__ )
__UpperCamelCase = [100, 87, 50, 1, 0]
__UpperCamelCase = len(UpperCamelCase__ )
with self.assertRaises(UpperCamelCase__,msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=UpperCamelCase__,timesteps=UpperCamelCase__ )
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = self.scheduler_classes[0]
__UpperCamelCase = self.get_scheduler_config()
__UpperCamelCase = scheduler_class(**UpperCamelCase__ )
__UpperCamelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCamelCase__,msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}',):
scheduler.set_timesteps(timesteps=UpperCamelCase__ )
| 310 |
'''simple docstring'''
import math
import unittest
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
assert isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(UpperCAmelCase__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def snake_case_ ( self ) -> int:
'''simple docstring'''
with self.assertRaises(UpperCamelCase__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 162 | 0 |
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __magic_name__( lowerCamelCase):
return getitem, k
def __magic_name__( lowerCamelCase, lowerCamelCase):
return setitem, k, v
def __magic_name__( lowerCamelCase):
return delitem, k
def __magic_name__( lowerCamelCase, lowerCamelCase, *lowerCamelCase):
try:
return fun(lowerCamelCase, *lowerCamelCase), None
except Exception as e:
return None, e
_UpperCAmelCase : Dict = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
_UpperCAmelCase : List[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
_UpperCAmelCase : Union[str, Any] = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
_UpperCAmelCase : Tuple = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
_UpperCAmelCase : Any = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
_UpperCAmelCase : Optional[Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
'''operations''', (
pytest.param(_add_items, id='''add items'''),
pytest.param(_overwrite_items, id='''overwrite items'''),
pytest.param(_delete_items, id='''delete items'''),
pytest.param(_access_absent_items, id='''access absent items'''),
pytest.param(_add_with_resize_up, id='''add with resize up'''),
pytest.param(_add_with_resize_down, id='''add with resize down'''),
), )
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = HashMap(initial_block_size=4)
__lowerCAmelCase = {}
for _, (fun, *args) in enumerate(lowerCamelCase):
__lowerCAmelCase , __lowerCAmelCase = _run_operation(lowerCamelCase, lowerCamelCase, *lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = _run_operation(lowerCamelCase, lowerCamelCase, *lowerCamelCase)
assert my_res == py_res
assert str(lowerCamelCase) == str(lowerCamelCase)
assert set(lowerCamelCase) == set(lowerCamelCase)
assert len(lowerCamelCase) == len(lowerCamelCase)
assert set(my.items()) == set(py.items())
def __magic_name__( ):
def is_public(lowerCamelCase) -> bool:
return not name.startswith('''_''')
__lowerCAmelCase = {name for name in dir({}) if is_public(lowerCamelCase)}
__lowerCAmelCase = {name for name in dir(HashMap()) if is_public(lowerCamelCase)}
assert dict_public_names > hash_public_names
| 353 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase=None , __lowercase=None , __lowercase=0 ):
__lowerCAmelCase = 1.0 if scale is None else scale
__lowerCAmelCase = 0.0 if loc is None else loc
super().__init__(__lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__lowercase )] )
@property
def _snake_case (self ):
return self.base_dist.mean * self.scale + self.loc
@property
def _snake_case (self ):
return self.base_dist.variance * self.scale**2
@property
def _snake_case (self ):
return self.variance.sqrt()
class a__ ( nn.Module ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase , __lowercase , **__lowercase ):
super().__init__(**__lowercase )
__lowerCAmelCase = args_dim
__lowerCAmelCase = nn.ModuleList([nn.Linear(__lowercase , __lowercase ) for dim in args_dim.values()] )
__lowerCAmelCase = domain_map
def _snake_case (self , __lowercase ):
__lowerCAmelCase = [proj(__lowercase ) for proj in self.proj]
return self.domain_map(*__lowercase )
class a__ ( nn.Module ):
"""simple docstring"""
def __init__(self , __lowercase ):
super().__init__()
__lowerCAmelCase = function
def _snake_case (self , __lowercase , *__lowercase ):
return self.function(__lowercase , *__lowercase )
class a__ :
"""simple docstring"""
__UpperCamelCase : type
__UpperCamelCase : int
__UpperCamelCase : Dict[str, int]
def __init__(self , __lowercase = 1 ):
__lowerCAmelCase = dim
__lowerCAmelCase = {k: dim * self.args_dim[k] for k in self.args_dim}
def _snake_case (self , __lowercase ):
if self.dim == 1:
return self.distribution_class(*__lowercase )
else:
return Independent(self.distribution_class(*__lowercase ) , 1 )
def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None , ):
__lowerCAmelCase = self._base_distribution(__lowercase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__lowercase , loc=__lowercase , scale=__lowercase , event_dim=self.event_dim )
@property
def _snake_case (self ):
return () if self.dim == 1 else (self.dim,)
@property
def _snake_case (self ):
return len(self.event_shape )
@property
def _snake_case (self ):
return 0.0
def _snake_case (self , __lowercase ):
return ParameterProjection(
in_features=__lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _snake_case (self , *__lowercase ):
raise NotImplementedError()
@staticmethod
def _snake_case (__lowercase ):
return (x + torch.sqrt(torch.square(__lowercase ) + 4.0 )) / 2.0
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
__UpperCamelCase : type = StudentT
@classmethod
def _snake_case (cls , __lowercase , __lowercase , __lowercase ):
__lowerCAmelCase = cls.squareplus(__lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
__lowerCAmelCase = 2.0 + cls.squareplus(__lowercase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1}
__UpperCamelCase : type = Normal
@classmethod
def _snake_case (cls , __lowercase , __lowercase ):
__lowerCAmelCase = cls.squareplus(__lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1}
__UpperCamelCase : type = NegativeBinomial
@classmethod
def _snake_case (cls , __lowercase , __lowercase ):
__lowerCAmelCase = cls.squareplus(__lowercase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _snake_case (self , __lowercase ):
__lowerCAmelCase , __lowerCAmelCase = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__lowercase , logits=__lowercase )
else:
return Independent(self.distribution_class(total_count=__lowercase , logits=__lowercase ) , 1 )
def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None ):
__lowerCAmelCase , __lowerCAmelCase = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 9 | 0 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__a: int = logging.get_logger(__name__)
__a: List[Any] = {"""vocab_file""": """spiece.model"""}
__a: Union[str, Any] = {
"""vocab_file""": {
"""TsinghuaAI/CPM-Generate""": """https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model""",
}
}
class UpperCAmelCase ( a__ ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<sep>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<cls>" , __lowerCAmelCase="<mask>" , __lowerCAmelCase=["<eop>", "<eod>"] , __lowerCAmelCase = None , **__lowerCAmelCase , ) -> None:
lowercase__ : Union[str, Any] = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token
lowercase__ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCAmelCase , remove_space=__lowerCAmelCase , keep_accents=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
lowercase__ : List[Any] = 3
lowercase__ : Any = do_lower_case
lowercase__ : List[Any] = remove_space
lowercase__ : Tuple = keep_accents
lowercase__ : Optional[int] = vocab_file
lowercase__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCAmelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
lowercase__ : Tuple = jieba
lowercase__ : Union[str, Any] = str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _lowerCAmelCase( self ) -> Union[str, Any]:
return len(self.sp_model )
def _lowerCAmelCase( self ) -> Any:
lowercase__ : List[Any] = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[int]:
lowercase__ : str = self.__dict__.copy()
lowercase__ : Union[str, Any] = None
return state
def __setstate__( self , __lowerCAmelCase ) -> List[Any]:
lowercase__ : List[str] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase__ : List[Any] = {}
lowercase__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Any:
if self.remove_space:
lowercase__ : int = ''' '''.join(inputs.strip().split() )
else:
lowercase__ : Union[str, Any] = inputs
lowercase__ : Any = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
lowercase__ : Dict = unicodedata.normalize('''NFKD''' , __lowerCAmelCase )
lowercase__ : List[Any] = ''''''.join([c for c in outputs if not unicodedata.combining(__lowerCAmelCase )] )
if self.do_lower_case:
lowercase__ : Any = outputs.lower()
return outputs
def _lowerCAmelCase( self , __lowerCAmelCase ) -> List[str]:
lowercase__ : Tuple = self.preprocess_text(__lowerCAmelCase )
lowercase__ : Optional[int] = self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
lowercase__ : Tuple = []
for piece in pieces:
if len(__lowerCAmelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowercase__ : Optional[int] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCAmelCase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowercase__ : int = cur_pieces[1:]
else:
lowercase__ : int = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowerCAmelCase )
else:
new_pieces.append(__lowerCAmelCase )
return new_pieces
def _lowerCAmelCase( self , __lowerCAmelCase ) -> str:
return self.sp_model.PieceToId(__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Optional[int]:
return self.sp_model.IdToPiece(__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> List[str]:
lowercase__ : Any = ''''''.join(__lowerCAmelCase ).replace(__lowerCAmelCase , ''' ''' ).strip()
return out_string
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> List[int]:
lowercase__ : Dict = [self.sep_token_id]
lowercase__ : Any = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is not None:
return ([0] * len(__lowerCAmelCase )) + [1] + ([0] * len(__lowerCAmelCase )) + [1, 1]
return ([0] * len(__lowerCAmelCase )) + [1, 1]
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> List[int]:
lowercase__ : Optional[Any] = [self.sep_token_id]
lowercase__ : int = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : int = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
lowercase__ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
def _lowerCAmelCase( self , *__lowerCAmelCase , **__lowerCAmelCase ) -> Dict:
lowercase__ : Dict = super()._decode(*__lowerCAmelCase , **__lowerCAmelCase )
lowercase__ : Union[str, Any] = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text
| 198 | '''simple docstring'''
import doctest
from collections import deque
import numpy as np
class UpperCAmelCase :
'''simple docstring'''
def __init__( self ) -> None:
lowercase__ : str = [2, 1, 2, -1]
lowercase__ : str = [1, 2, 3, 4]
def _lowerCAmelCase( self ) -> list[float]:
lowercase__ : Optional[Any] = len(self.first_signal )
lowercase__ : Union[str, Any] = len(self.second_signal )
lowercase__ : int = max(__lowerCAmelCase , __lowerCAmelCase )
# create a zero matrix of max_length x max_length
lowercase__ : List[str] = [[0] * max_length for i in range(__lowerCAmelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__lowerCAmelCase ):
lowercase__ : int = deque(self.second_signal )
rotated_signal.rotate(__lowerCAmelCase )
for j, item in enumerate(__lowerCAmelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
lowercase__ : Optional[int] = np.matmul(np.transpose(__lowerCAmelCase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__lowerCAmelCase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 198 | 1 |
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments | 270 |
'''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
__lowerCAmelCase = logging.getLogger(__name__)
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = "masked_bert"
def __init__(self , UpperCAmelCase=30522 , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=3072 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-1_2 , UpperCAmelCase=0 , UpperCAmelCase="topK" , UpperCAmelCase="constant" , UpperCAmelCase=0.0 , **UpperCAmelCase , ) -> int:
super().__init__(pad_token_id=UpperCAmelCase , **UpperCAmelCase )
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = pruning_method
_snake_case = mask_init
_snake_case = mask_scale | 270 | 1 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[str] = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-canny""" , from_pt=_SCREAMING_SNAKE_CASE , dtype=jnp.bfloataa )
UpperCAmelCase , UpperCAmelCase : Optional[int] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE , dtype=jnp.bfloataa )
UpperCAmelCase : List[str] = controlnet_params
UpperCAmelCase : int = """bird"""
UpperCAmelCase : Tuple = jax.device_count()
UpperCAmelCase : Optional[int] = pipe.prepare_text_inputs([prompts] * num_samples )
UpperCAmelCase : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" )
UpperCAmelCase : Tuple = pipe.prepare_image_inputs([canny_image] * num_samples )
UpperCAmelCase : List[Any] = jax.random.PRNGKey(0 )
UpperCAmelCase : str = jax.random.split(_SCREAMING_SNAKE_CASE , jax.device_count() )
UpperCAmelCase : int = replicate(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = shard(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = shard(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = pipe(
prompt_ids=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , params=_SCREAMING_SNAKE_CASE , prng_seed=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , jit=_SCREAMING_SNAKE_CASE , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
UpperCAmelCase : int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCAmelCase : Optional[Any] = images[0, 253:256, 253:256, -1]
UpperCAmelCase : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCAmelCase : Union[str, Any] = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : str = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-openpose""" , from_pt=_SCREAMING_SNAKE_CASE , dtype=jnp.bfloataa )
UpperCAmelCase , UpperCAmelCase : int = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE , dtype=jnp.bfloataa )
UpperCAmelCase : int = controlnet_params
UpperCAmelCase : List[Any] = """Chef in the kitchen"""
UpperCAmelCase : Dict = jax.device_count()
UpperCAmelCase : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
UpperCAmelCase : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" )
UpperCAmelCase : List[str] = pipe.prepare_image_inputs([pose_image] * num_samples )
UpperCAmelCase : Dict = jax.random.PRNGKey(0 )
UpperCAmelCase : Any = jax.random.split(_SCREAMING_SNAKE_CASE , jax.device_count() )
UpperCAmelCase : Tuple = replicate(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = shard(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = shard(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = pipe(
prompt_ids=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , params=_SCREAMING_SNAKE_CASE , prng_seed=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , jit=_SCREAMING_SNAKE_CASE , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
UpperCAmelCase : Dict = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCAmelCase : int = images[0, 253:256, 253:256, -1]
UpperCAmelCase : Optional[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCAmelCase : Any = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 109 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : List[str] = {
'''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''',
'''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''',
'''kssteven/ibert-roberta-large-mnli''': (
'''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'''
),
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = 'ibert'
def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-12 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase="absolute" , _lowerCamelCase=False , _lowerCamelCase="none" , **_lowerCamelCase , ) ->Any:
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : str = position_embedding_type
SCREAMING_SNAKE_CASE : Optional[int] = quant_mode
SCREAMING_SNAKE_CASE : Dict = force_dequant
class a_ ( a__ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 313 | 0 |
"""simple docstring"""
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
# load base model
UpperCAmelCase = StableDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
UpperCAmelCase = load_file(lowercase_ )
UpperCAmelCase = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
UpperCAmelCase = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' )
UpperCAmelCase = pipeline.text_encoder
else:
UpperCAmelCase = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' )
UpperCAmelCase = pipeline.unet
# find the target layer
UpperCAmelCase = layer_infos.pop(0 )
while len(lowercase_ ) > -1:
try:
UpperCAmelCase = curr_layer.__getattr__(lowercase_ )
if len(lowercase_ ) > 0:
UpperCAmelCase = layer_infos.pop(0 )
elif len(lowercase_ ) == 0:
break
except Exception:
if len(lowercase_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
UpperCAmelCase = layer_infos.pop(0 )
UpperCAmelCase = []
if "lora_down" in key:
pair_keys.append(key.replace('lora_down' , 'lora_up' ) )
pair_keys.append(lowercase_ )
else:
pair_keys.append(lowercase_ )
pair_keys.append(key.replace('lora_up' , 'lora_down' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
UpperCAmelCase = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
UpperCAmelCase = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ ).unsqueeze(2 ).unsqueeze(3 )
else:
UpperCAmelCase = state_dict[pair_keys[0]].to(torch.floataa )
UpperCAmelCase = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ )
# update visited list
for item in pair_keys:
visited.append(lowercase_ )
return pipeline
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
snake_case_ = parser.parse_args()
snake_case_ = args.base_model_path
snake_case_ = args.checkpoint_path
snake_case_ = args.dump_path
snake_case_ = args.lora_prefix_unet
snake_case_ = args.lora_prefix_text_encoder
snake_case_ = args.alpha
snake_case_ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
snake_case_ = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 181 |
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def _lowerCAmelCase ( lowercase_ ):
random.seed(lowercase_ )
np.random.seed(lowercase_ )
torch.manual_seed(lowercase_ )
torch.cuda.manual_seed_all(lowercase_ )
# ^^ safe to call this function even if cuda is not available
class A_ :
"""simple docstring"""
def __init__( self :Any , lowercase_ :Iterable[torch.nn.Parameter] , lowercase_ :float = 0.9999 , lowercase_ :float = 0.0 , lowercase_ :int = 0 , lowercase_ :bool = False , lowercase_ :Union[float, int] = 1.0 , lowercase_ :Union[float, int] = 2 / 3 , lowercase_ :Optional[Any] = None , lowercase_ :Dict[str, Any] = None , **lowercase_ :Dict , ) -> Optional[int]:
if isinstance(lowercase_ , torch.nn.Module ):
UpperCAmelCase = (
'Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage`' , '1.0.0' , lowercase_ , standard_warn=lowercase_ , )
UpperCAmelCase = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
UpperCAmelCase = True
if kwargs.get('max_value' , lowercase_ ) is not None:
UpperCAmelCase = 'The `max_value` argument is deprecated. Please use `decay` instead.'
deprecate('max_value' , '1.0.0' , lowercase_ , standard_warn=lowercase_ )
UpperCAmelCase = kwargs['max_value']
if kwargs.get('min_value' , lowercase_ ) is not None:
UpperCAmelCase = 'The `min_value` argument is deprecated. Please use `min_decay` instead.'
deprecate('min_value' , '1.0.0' , lowercase_ , standard_warn=lowercase_ )
UpperCAmelCase = kwargs['min_value']
UpperCAmelCase = list(lowercase_ )
UpperCAmelCase = [p.clone().detach() for p in parameters]
if kwargs.get('device' , lowercase_ ) is not None:
UpperCAmelCase = 'The `device` argument is deprecated. Please use `to` instead.'
deprecate('device' , '1.0.0' , lowercase_ , standard_warn=lowercase_ )
self.to(device=kwargs['device'] )
UpperCAmelCase = None
UpperCAmelCase = decay
UpperCAmelCase = min_decay
UpperCAmelCase = update_after_step
UpperCAmelCase = use_ema_warmup
UpperCAmelCase = inv_gamma
UpperCAmelCase = power
UpperCAmelCase = 0
UpperCAmelCase = None # set in `step()`
UpperCAmelCase = model_cls
UpperCAmelCase = model_config
@classmethod
def UpperCAmelCase__ ( cls :int , lowercase_ :Union[str, Any] , lowercase_ :Any ) -> "EMAModel":
UpperCAmelCase , UpperCAmelCase = model_cls.load_config(lowercase_ , return_unused_kwargs=lowercase_ )
UpperCAmelCase = model_cls.from_pretrained(lowercase_ )
UpperCAmelCase = cls(model.parameters() , model_cls=lowercase_ , model_config=model.config )
ema_model.load_state_dict(lowercase_ )
return ema_model
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :List[str] ) -> int:
if self.model_cls is None:
raise ValueError('`save_pretrained` can only be used if `model_cls` was defined at __init__.' )
if self.model_config is None:
raise ValueError('`save_pretrained` can only be used if `model_config` was defined at __init__.' )
UpperCAmelCase = self.model_cls.from_config(self.model_config )
UpperCAmelCase = self.state_dict()
state_dict.pop('shadow_params' , lowercase_ )
model.register_to_config(**lowercase_ )
self.copy_to(model.parameters() )
model.save_pretrained(lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :int ) -> float:
UpperCAmelCase = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
UpperCAmelCase = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
UpperCAmelCase = (1 + step) / (10 + step)
UpperCAmelCase = min(lowercase_ , self.decay )
# make sure decay is not smaller than min_decay
UpperCAmelCase = max(lowercase_ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Iterable[torch.nn.Parameter] ) -> Optional[int]:
if isinstance(lowercase_ , torch.nn.Module ):
UpperCAmelCase = (
'Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '
'Please pass the parameters of the module instead.'
)
deprecate(
'passing a `torch.nn.Module` to `ExponentialMovingAverage.step`' , '1.0.0' , lowercase_ , standard_warn=lowercase_ , )
UpperCAmelCase = parameters.parameters()
UpperCAmelCase = list(lowercase_ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
UpperCAmelCase = self.get_decay(self.optimization_step )
UpperCAmelCase = decay
UpperCAmelCase = 1 - decay
UpperCAmelCase = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowercase_ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
UpperCAmelCase = deepspeed.zero.GatheredParameters(lowercase_ , modifier_rank=lowercase_ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowercase_ )
def UpperCAmelCase__ ( self :Tuple , lowercase_ :Iterable[torch.nn.Parameter] ) -> None:
UpperCAmelCase = list(lowercase_ )
for s_param, param in zip(self.shadow_params , lowercase_ ):
param.data.copy_(s_param.to(param.device ).data )
def UpperCAmelCase__ ( self :Dict , lowercase_ :Tuple=None , lowercase_ :Union[str, Any]=None ) -> None:
UpperCAmelCase = [
p.to(device=lowercase_ , dtype=lowercase_ ) if p.is_floating_point() else p.to(device=lowercase_ )
for p in self.shadow_params
]
def UpperCAmelCase__ ( self :Union[str, Any] ) -> dict:
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Iterable[torch.nn.Parameter] ) -> None:
UpperCAmelCase = [param.detach().cpu().clone() for param in parameters]
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :Iterable[torch.nn.Parameter] ) -> None:
if self.temp_stored_params is None:
raise RuntimeError('This ExponentialMovingAverage has no `store()`ed weights ' 'to `restore()`' )
for c_param, param in zip(self.temp_stored_params , lowercase_ ):
param.data.copy_(c_param.data )
# Better memory-wise.
UpperCAmelCase = None
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :dict ) -> None:
UpperCAmelCase = copy.deepcopy(lowercase_ )
UpperCAmelCase = state_dict.get('decay' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('Decay must be between 0 and 1' )
UpperCAmelCase = state_dict.get('min_decay' , self.min_decay )
if not isinstance(self.min_decay , lowercase_ ):
raise ValueError('Invalid min_decay' )
UpperCAmelCase = state_dict.get('optimization_step' , self.optimization_step )
if not isinstance(self.optimization_step , lowercase_ ):
raise ValueError('Invalid optimization_step' )
UpperCAmelCase = state_dict.get('update_after_step' , self.update_after_step )
if not isinstance(self.update_after_step , lowercase_ ):
raise ValueError('Invalid update_after_step' )
UpperCAmelCase = state_dict.get('use_ema_warmup' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowercase_ ):
raise ValueError('Invalid use_ema_warmup' )
UpperCAmelCase = state_dict.get('inv_gamma' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('Invalid inv_gamma' )
UpperCAmelCase = state_dict.get('power' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('Invalid power' )
UpperCAmelCase = state_dict.get('shadow_params' , lowercase_ )
if shadow_params is not None:
UpperCAmelCase = shadow_params
if not isinstance(self.shadow_params , lowercase_ ):
raise ValueError('shadow_params must be a list' )
if not all(isinstance(lowercase_ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('shadow_params must all be Tensors' )
| 181 | 1 |
"""simple docstring"""
from random import shuffle
import tensorflow as tf
from numpy import array
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
__lowercase : str = int(__UpperCamelCase )
assert noofclusters < len(__UpperCamelCase )
# Find out the dimensionality
__lowercase : Optional[Any] = len(vectors[0] )
# Will help select random centroids from among the available vectors
__lowercase : Optional[Any] = list(range(len(__UpperCamelCase ) ) )
shuffle(__UpperCamelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__lowercase : List[Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__lowercase : Tuple = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__lowercase : Optional[Any] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(__UpperCamelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
__lowercase : Union[str, Any] = tf.placeholder('''float64''' , [dim] )
__lowercase : Any = []
for centroid in centroids:
cent_assigns.append(tf.assign(__UpperCamelCase , __UpperCamelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__lowercase : Dict = [tf.Variable(0 ) for i in range(len(__UpperCamelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__lowercase : int = tf.placeholder('''int32''' )
__lowercase : Any = []
for assignment in assignments:
cluster_assigns.append(tf.assign(__UpperCamelCase , __UpperCamelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__lowercase : Optional[Any] = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__lowercase : Dict = tf.reduce_mean(__UpperCamelCase , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__lowercase : int = tf.placeholder('''float''' , [dim] )
__lowercase : Optional[Any] = tf.placeholder('''float''' , [dim] )
__lowercase : Optional[int] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(__UpperCamelCase , __UpperCamelCase ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__lowercase : Dict = tf.placeholder('''float''' , [noofclusters] )
__lowercase : Dict = tf.argmin(__UpperCamelCase , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__lowercase : Dict = tf.initialize_all_variables()
# Initialize all variables
sess.run(__UpperCamelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__lowercase : str = 1_00
for _ in range(__UpperCamelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(__UpperCamelCase ) ):
__lowercase : List[Any] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__lowercase : Tuple = [
sess.run(__UpperCamelCase , feed_dict={va: vect, va: sess.run(__UpperCamelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__lowercase : Tuple = sess.run(
__UpperCamelCase , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(__UpperCamelCase ):
# Collect all the vectors assigned to this cluster
__lowercase : List[Any] = [
vectors[i]
for i in range(len(__UpperCamelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__lowercase : Optional[Any] = sess.run(
__UpperCamelCase , feed_dict={mean_input: array(__UpperCamelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__lowercase : Any = sess.run(__UpperCamelCase )
__lowercase : Any = sess.run(__UpperCamelCase )
return centroids, assignments
| 249 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : Optional[int] = 3_84
__lowercase : str = 7
if "tiny" in model_name:
__lowercase : List[str] = 96
__lowercase : Any = (2, 2, 6, 2)
__lowercase : Dict = (3, 6, 12, 24)
elif "small" in model_name:
__lowercase : str = 96
__lowercase : Optional[int] = (2, 2, 18, 2)
__lowercase : Tuple = (3, 6, 12, 24)
elif "base" in model_name:
__lowercase : Tuple = 1_28
__lowercase : Tuple = (2, 2, 18, 2)
__lowercase : int = (4, 8, 16, 32)
__lowercase : str = 12
__lowercase : Any = 5_12
elif "large" in model_name:
__lowercase : List[str] = 1_92
__lowercase : List[Any] = (2, 2, 18, 2)
__lowercase : Optional[Any] = (6, 12, 24, 48)
__lowercase : Optional[int] = 12
__lowercase : Optional[Any] = 7_68
# set label information
__lowercase : Any = 1_50
__lowercase : Tuple = '''huggingface/label-files'''
__lowercase : int = '''ade20k-id2label.json'''
__lowercase : Union[str, Any] = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
__lowercase : Union[str, Any] = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
__lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
__lowercase : Any = SwinConfig(
embed_dim=__UpperCamelCase , depths=__UpperCamelCase , num_heads=__UpperCamelCase , window_size=__UpperCamelCase , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
__lowercase : List[Any] = UperNetConfig(
backbone_config=__UpperCamelCase , auxiliary_in_channels=__UpperCamelCase , num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid=__UpperCamelCase , )
return config
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : str = []
# fmt: off
# stem
rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.stages.{i}.downsample.reduction.weight""", f"""backbone.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.stages.{i}.downsample.norm.weight""", f"""backbone.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.stages.{i}.downsample.norm.bias""", f"""backbone.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : Any = dct.pop(__UpperCamelCase )
__lowercase : Any = val
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
__lowercase : Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowercase : Optional[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowercase : Dict = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" )
__lowercase : int = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowercase : List[Any] = in_proj_weight[:dim, :]
__lowercase : Tuple = in_proj_bias[: dim]
__lowercase : List[Any] = in_proj_weight[
dim : dim * 2, :
]
__lowercase : int = in_proj_bias[
dim : dim * 2
]
__lowercase : str = in_proj_weight[
-dim :, :
]
__lowercase : List[Any] = in_proj_bias[-dim :]
# fmt: on
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase ,__lowercase : str = x.shape
__lowercase : List[str] = x.reshape(__UpperCamelCase , 4 , in_channel // 4 )
__lowercase : Dict = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(__UpperCamelCase , __UpperCamelCase )
return x
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase ,__lowercase : Optional[int] = x.shape
__lowercase : Union[str, Any] = x.reshape(__UpperCamelCase , in_channel // 4 , 4 )
__lowercase : int = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(__UpperCamelCase , __UpperCamelCase )
return x
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : int = x.shape[0]
__lowercase : List[str] = x.reshape(4 , in_channel // 4 )
__lowercase : Any = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(__UpperCamelCase )
return x
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : Union[str, Any] = x.shape[0]
__lowercase : List[str] = x.reshape(in_channel // 4 , 4 )
__lowercase : Union[str, Any] = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(__UpperCamelCase )
return x
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : List[Any] = {
'''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''',
'''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''',
'''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''',
'''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''',
}
__lowercase : Any = model_name_to_url[model_name]
__lowercase : Any = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location='''cpu''' , file_name=__UpperCamelCase )[
'''state_dict'''
]
for name, param in state_dict.items():
print(__UpperCamelCase , param.shape )
__lowercase : Tuple = get_upernet_config(__UpperCamelCase )
__lowercase : List[Any] = UperNetForSemanticSegmentation(__UpperCamelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__lowercase : Optional[Any] = state_dict.pop(__UpperCamelCase )
if "bn" in key:
__lowercase : List[Any] = key.replace('''bn''' , '''batch_norm''' )
__lowercase : Optional[Any] = val
# rename keys
__lowercase : Tuple = create_rename_keys(__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_q_k_v(__UpperCamelCase , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
__lowercase : Optional[Any] = reverse_correct_unfold_reduction_order(__UpperCamelCase )
if "norm" in key:
__lowercase : Optional[Any] = reverse_correct_unfold_norm_order(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
# verify on image
__lowercase : Any = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
__lowercase : str = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ).convert('''RGB''' )
__lowercase : Union[str, Any] = SegformerImageProcessor()
__lowercase : int = processor(__UpperCamelCase , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
__lowercase : List[Any] = model(__UpperCamelCase )
__lowercase : Union[str, Any] = outputs.logits
print(logits.shape )
print('''First values of logits:''' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
__lowercase : Tuple = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] )
elif model_name == "upernet-swin-small":
__lowercase : Optional[Any] = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] )
elif model_name == "upernet-swin-base":
__lowercase : Optional[int] = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] )
elif model_name == "upernet-swin-large":
__lowercase : Any = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __UpperCamelCase , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
print(f"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(f"""openmmlab/{model_name}""" )
processor.push_to_hub(f"""openmmlab/{model_name}""" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-swin-tiny',
type=str,
choices=[F"upernet-swin-{size}" for size in ['tiny', 'small', 'base', 'large']],
help='Name of the Swin + UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a_ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 249 | 1 |
def UpperCamelCase_( snake_case : int , snake_case : int ):
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def UpperCamelCase_( ):
'''simple docstring'''
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 370 |
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
_SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
_SCREAMING_SNAKE_CASE : List[str] = {"facebook/bart-base": BartForConditionalGeneration}
_SCREAMING_SNAKE_CASE : Union[str, Any] = {"facebook/bart-base": BartTokenizer}
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file" , type=snake_case , default=snake_case , help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length" , type=snake_case , default=5 , help="The maximum total input sequence length after tokenization." , )
parser.add_argument(
"--num_beams" , type=snake_case , default=snake_case , help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) , )
parser.add_argument(
"--model_name_or_path" , type=snake_case , help="Path to pretrained model or model identifier from huggingface.co/models." , required=snake_case , )
parser.add_argument(
"--config_name" , type=snake_case , default=snake_case , help="Pretrained config name or path if not the same as model_name" , )
parser.add_argument(
"--device" , type=snake_case , default="cpu" , help="Device where the model will be run" , )
parser.add_argument("--output_file_path" , type=snake_case , default=snake_case , help="Where to store the final ONNX file." )
snake_case_ = parser.parse_args()
return args
def UpperCamelCase_( snake_case : Union[str, Any] , snake_case : List[str]="cpu" ):
'''simple docstring'''
snake_case_ = model_dict[model_name].from_pretrained(snake_case ).to(snake_case )
snake_case_ = tokenizer_dict[model_name].from_pretrained(snake_case )
if model_name in ["facebook/bart-base"]:
snake_case_ = 0
snake_case_ = None
snake_case_ = 0
return huggingface_model, tokenizer
def UpperCamelCase_( snake_case : int , snake_case : Tuple , snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : Optional[Any] ):
'''simple docstring'''
model.eval()
snake_case_ = None
snake_case_ = torch.jit.script(BARTBeamSearchGenerator(snake_case ) )
with torch.no_grad():
snake_case_ = "My friends are cool but they eat too many carbs."
snake_case_ = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_0_2_4 , return_tensors="pt" ).to(model.device )
snake_case_ = model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=snake_case , max_length=snake_case , early_stopping=snake_case , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
snake_case , (
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , snake_case , opset_version=1_4 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} , example_outputs=snake_case , )
logger.info("Model exported to {}".format(snake_case ) )
snake_case_ = remove_dup_initializers(os.path.abspath(snake_case ) )
logger.info("Deduplicated and optimized model written to {}".format(snake_case ) )
snake_case_ = onnxruntime.InferenceSession(snake_case )
snake_case_ = ort_sess.run(
snake_case , {
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(snake_case ),
"max_length": np.array(snake_case ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = parse_args()
snake_case_ = 5
snake_case_ = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
snake_case_ = torch.device(args.device )
snake_case_ , snake_case_ = load_model_tokenizer(args.model_name_or_path , snake_case )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(snake_case )
if args.max_length:
snake_case_ = args.max_length
if args.num_beams:
snake_case_ = args.num_beams
if args.output_file_path:
snake_case_ = args.output_file_path
else:
snake_case_ = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(snake_case , snake_case , snake_case , snake_case , snake_case )
if __name__ == "__main__":
main()
| 92 | 0 |
"""simple docstring"""
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
'''simple docstring'''
return abs(_lowerCAmelCase ) if a == 0 else greatest_common_divisor(b % a , _lowerCAmelCase )
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
'''simple docstring'''
while y: # --> when y=0 then loop will terminate and return x as final GCD.
lowercase__ , lowercase__ : Optional[int] = y, x % y
return abs(_lowerCAmelCase )
def a_ ( ):
'''simple docstring'''
try:
lowercase__ : int = input('Enter two integers separated by comma (,): ' ).split(',' )
lowercase__ : Tuple = int(nums[0] )
lowercase__ : str = int(nums[1] )
print(
f"""greatest_common_divisor({num_a}, {num_a}) = """
f"""{greatest_common_divisor(_lowerCAmelCase , _lowerCAmelCase )}""" )
print(f"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(_lowerCAmelCase , _lowerCAmelCase )}""" )
except (IndexError, UnboundLocalError, ValueError):
print('Wrong input' )
if __name__ == "__main__":
main()
| 77 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCAmelCase__ :
'''simple docstring'''
UpperCamelCase = None
def snake_case__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
__UpperCAmelCase : Optional[int] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , a_ )
def snake_case__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Union[str, Any] = os.path.join(a_ , '''feat_extract.json''' )
feat_extract_first.to_json_file(a_ )
__UpperCAmelCase : Any = self.feature_extraction_class.from_json_file(a_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def snake_case__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : List[str] = feat_extract_first.save_pretrained(a_ )[0]
check_json_file_has_correct_format(a_ )
__UpperCAmelCase : Optional[Any] = self.feature_extraction_class.from_pretrained(a_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def snake_case__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : int = self.feature_extraction_class()
self.assertIsNotNone(a_ )
| 226 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
lowerCamelCase_ : Dict = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase_ : Any = 'rwkv'
lowerCamelCase_ : Optional[Any] = {'max_position_embeddings': 'context_length'}
def __init__( self , lowerCamelCase=50277 , lowerCamelCase=1024 , lowerCamelCase=4096 , lowerCamelCase=32 , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=1e-5 , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=6 , lowerCamelCase=False , lowerCamelCase=True , **lowerCamelCase , ) -> Any:
snake_case_ = vocab_size
snake_case_ = context_length
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = attention_hidden_size if attention_hidden_size is not None else hidden_size
snake_case_ = intermediate_size if intermediate_size is not None else 4 * hidden_size
snake_case_ = layer_norm_epsilon
snake_case_ = rescale_every
snake_case_ = use_cache
snake_case_ = bos_token_id
snake_case_ = eos_token_id
super().__init__(
tie_word_embeddings=_a , bos_token_id=_a , eos_token_id=_a , **_a ) | 361 |
import numpy as np
def UpperCamelCase( lowercase_ ) -> np.array:
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod() | 34 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
class a__ ( _UpperCamelCase ):
A = ['pixel_values']
def __init__( self : Union[str, Any],_A : bool = True,_A : Optional[Dict[str, int]] = None,_A : PILImageResampling = PILImageResampling.BICUBIC,_A : bool = True,_A : bool = True,_A : Union[int, float] = 1 / 255,_A : Dict[str, int] = None,_A : bool = True,_A : Optional[Union[float, List[float]]] = None,_A : Optional[Union[float, List[float]]] = None,**_A : int,):
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size if size is not None else {'height': 224, 'width': 224}
SCREAMING_SNAKE_CASE_ : Optional[int] = get_size_dict(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : int = crop_size if crop_size is not None else {'height': 224, 'width': 224}
SCREAMING_SNAKE_CASE_ : int = get_size_dict(_UpperCAmelCase,default_to_square=_UpperCAmelCase,param_name="crop_size" )
SCREAMING_SNAKE_CASE_ : Any = do_resize
SCREAMING_SNAKE_CASE_ : str = do_rescale
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_normalize
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_center_crop
SCREAMING_SNAKE_CASE_ : Any = crop_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = size
SCREAMING_SNAKE_CASE_ : Optional[int] = resample
SCREAMING_SNAKE_CASE_ : str = rescale_factor
SCREAMING_SNAKE_CASE_ : Optional[int] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
SCREAMING_SNAKE_CASE_ : int = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __UpperCamelCase ( self : List[str],_A : np.ndarray,_A : Dict[str, int],_A : PILImageResampling = PILImageResampling.BILINEAR,_A : Optional[Union[str, ChannelDimension]] = None,**_A : Optional[Any],):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = get_size_dict(_UpperCAmelCase )
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_resize_output_image_size(_UpperCAmelCase,size=size["shortest_edge"],default_to_square=_UpperCAmelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
SCREAMING_SNAKE_CASE_ : str = (size['height'], size['width'])
else:
raise ValueError(F'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}' )
return resize(_UpperCAmelCase,size=_UpperCAmelCase,resample=_UpperCAmelCase,data_format=_UpperCAmelCase,**_UpperCAmelCase )
def __UpperCamelCase ( self : List[str],_A : np.ndarray,_A : Dict[str, int],_A : Optional[Union[str, ChannelDimension]] = None,**_A : Any,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(_UpperCAmelCase,size=(size["height"], size["width"]),data_format=_UpperCAmelCase,**_UpperCAmelCase )
def __UpperCamelCase ( self : Optional[Any],_A : np.ndarray,_A : float,_A : Optional[Union[str, ChannelDimension]] = None,**_A : str ):
"""simple docstring"""
return rescale(_UpperCAmelCase,scale=_UpperCAmelCase,data_format=_UpperCAmelCase,**_UpperCAmelCase )
def __UpperCamelCase ( self : List[Any],_A : np.ndarray,_A : Union[float, List[float]],_A : Union[float, List[float]],_A : Optional[Union[str, ChannelDimension]] = None,**_A : List[Any],):
"""simple docstring"""
return normalize(_UpperCAmelCase,mean=_UpperCAmelCase,std=_UpperCAmelCase,data_format=_UpperCAmelCase,**_UpperCAmelCase )
def __UpperCamelCase ( self : Optional[int],_A : ImageInput,_A : Optional[bool] = None,_A : Dict[str, int] = None,_A : PILImageResampling = None,_A : bool = None,_A : int = None,_A : Optional[bool] = None,_A : Optional[float] = None,_A : Optional[bool] = None,_A : Optional[Union[float, List[float]]] = None,_A : Optional[Union[float, List[float]]] = None,_A : Optional[Union[str, TensorType]] = None,_A : Union[str, ChannelDimension] = ChannelDimension.FIRST,**_A : Any,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_ : Dict = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_size_dict(_UpperCAmelCase,param_name="crop_size",default_to_square=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ : Optional[int] = get_size_dict(_UpperCAmelCase )
if not is_batched(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = [images]
if not valid_images(_UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ : int = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ : Dict = [self.resize(image=_UpperCAmelCase,size=_UpperCAmelCase,resample=_UpperCAmelCase ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_ : Tuple = [self.center_crop(image=_UpperCAmelCase,size=_UpperCAmelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.rescale(image=_UpperCAmelCase,scale=_UpperCAmelCase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ : str = [self.normalize(image=_UpperCAmelCase,mean=_UpperCAmelCase,std=_UpperCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [to_channel_dimension_format(_UpperCAmelCase,_UpperCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'pixel_values': images}
return BatchFeature(data=_UpperCAmelCase,tensor_type=_UpperCAmelCase )
| 18 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
__lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[Any]:
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_a : List[Any] = model_type_to_module_name(lowerCAmelCase_ )
_a : Optional[Any] = importlib.import_module(f""".{module_name}""" , 'transformers.models' )
try:
return getattr(lowerCAmelCase_ , lowerCAmelCase_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(lowerCAmelCase_ , '__name__' , lowerCAmelCase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_a : Dict = importlib.import_module('transformers' )
if hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
return getattr(lowerCAmelCase_ , lowerCAmelCase_ )
return None
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , **lowerCAmelCase_ , ) -> Tuple:
_a : List[str] = get_file_from_repo(
lowerCAmelCase_ , lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , revision=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(lowerCAmelCase_ , encoding='utf-8' ) as reader:
return json.load(lowerCAmelCase_ )
class __magic_name__ :
def __init__( self : List[str] ):
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(_UpperCAmelCase )
def __lowercase ( cls : Dict ,_UpperCAmelCase : Union[str, Any] ,**_UpperCAmelCase : Optional[Any] ):
_a : Any = kwargs.pop('config' ,_UpperCAmelCase )
_a : Dict = kwargs.pop('trust_remote_code' ,_UpperCAmelCase )
_a : Any = True
_a , _a : Tuple = ImageProcessingMixin.get_image_processor_dict(_UpperCAmelCase ,**_UpperCAmelCase )
_a : List[Any] = config_dict.get('image_processor_type' ,_UpperCAmelCase )
_a : int = None
if "AutoImageProcessor" in config_dict.get('auto_map' ,{} ):
_a : Any = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_a : List[Any] = config_dict.pop('feature_extractor_type' ,_UpperCAmelCase )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
_a : Optional[int] = feature_extractor_class.replace('FeatureExtractor' ,'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' ,{} ):
_a : List[Any] = config_dict['auto_map']['AutoFeatureExtractor']
_a : List[str] = feature_extractor_auto_map.replace('FeatureExtractor' ,'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : Dict = AutoConfig.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase )
# It could be in `config.image_processor_type``
_a : Optional[int] = getattr(_UpperCAmelCase ,'image_processor_type' ,_UpperCAmelCase )
if hasattr(_UpperCAmelCase ,'auto_map' ) and "AutoImageProcessor" in config.auto_map:
_a : Union[str, Any] = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
_a : Optional[int] = image_processor_class_from_name(_UpperCAmelCase )
_a : List[str] = image_processor_auto_map is not None
_a : Optional[int] = image_processor_class is not None or type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING
_a : Optional[int] = resolve_trust_remote_code(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
if has_remote_code and trust_remote_code:
_a : Dict = get_class_from_dynamic_module(
_UpperCAmelCase ,_UpperCAmelCase ,**_UpperCAmelCase )
_a : int = kwargs.pop('code_revision' ,_UpperCAmelCase )
if os.path.isdir(_UpperCAmelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING:
_a : Dict = IMAGE_PROCESSOR_MAPPING[type(_UpperCAmelCase )]
return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
raise ValueError(
F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def __lowercase ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Dict ):
IMAGE_PROCESSOR_MAPPING.register(_UpperCAmelCase ,_UpperCAmelCase )
| 89 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : int = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = "mobilenet_v2"
def __init__( self : Dict , lowercase_ : Any=3 , lowercase_ : Optional[int]=224 , lowercase_ : Union[str, Any]=1.0 , lowercase_ : List[Any]=8 , lowercase_ : Optional[int]=8 , lowercase_ : str=6 , lowercase_ : Optional[int]=32 , lowercase_ : List[str]=True , lowercase_ : Dict=True , lowercase_ : Optional[int]="relu6" , lowercase_ : Any=True , lowercase_ : Any=0.8 , lowercase_ : Any=0.02 , lowercase_ : List[str]=0.0_01 , lowercase_ : Any=255 , **lowercase_ : List[str] , ):
'''simple docstring'''
super().__init__(**lowercase_)
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''')
SCREAMING_SNAKE_CASE_ : List[Any] = num_channels
SCREAMING_SNAKE_CASE_ : Dict = image_size
SCREAMING_SNAKE_CASE_ : Dict = depth_multiplier
SCREAMING_SNAKE_CASE_ : Optional[int] = depth_divisible_by
SCREAMING_SNAKE_CASE_ : List[Any] = min_depth
SCREAMING_SNAKE_CASE_ : Optional[int] = expand_ratio
SCREAMING_SNAKE_CASE_ : int = output_stride
SCREAMING_SNAKE_CASE_ : Optional[int] = first_layer_is_expansion
SCREAMING_SNAKE_CASE_ : List[str] = finegrained_output
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : List[Any] = tf_padding
SCREAMING_SNAKE_CASE_ : Tuple = classifier_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = initializer_range
SCREAMING_SNAKE_CASE_ : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Union[str, Any] = semantic_loss_ignore_index
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = version.parse("1.11" )
@property
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
return OrderedDict([('''pixel_values''', {0: '''batch'''})])
@property
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})])
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})])
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
return 1e-4
| 318 |
"""simple docstring"""
from __future__ import annotations
import queue
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Tuple , lowercase_ : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = data
SCREAMING_SNAKE_CASE_ : Tuple = None
SCREAMING_SNAKE_CASE_ : Dict = None
def _A () -> TreeNode:
"""simple docstring"""
print('''\n********Press N to stop entering at any point of time********\n''' )
SCREAMING_SNAKE_CASE_ : List[Any] = input('''Enter the value of the root node: ''' ).strip().lower()
SCREAMING_SNAKE_CASE_ : queue.Queue = queue.Queue()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TreeNode(int(__a ) )
q.put(__a )
while not q.empty():
SCREAMING_SNAKE_CASE_ : Optional[int] = q.get()
SCREAMING_SNAKE_CASE_ : List[str] = f'Enter the left node of {node_found.data}: '
SCREAMING_SNAKE_CASE_ : Optional[int] = input(__a ).strip().lower() or '''n'''
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE_ : List[str] = TreeNode(int(__a ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = left_node
q.put(__a )
SCREAMING_SNAKE_CASE_ : str = f'Enter the right node of {node_found.data}: '
SCREAMING_SNAKE_CASE_ : str = input(__a ).strip().lower() or '''n'''
if check == "n":
return tree_node
SCREAMING_SNAKE_CASE_ : Any = TreeNode(int(__a ) )
SCREAMING_SNAKE_CASE_ : int = right_node
q.put(__a )
raise
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
print(node.data , end=''',''' )
pre_order(node.left )
pre_order(node.right )
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
in_order(node.left )
print(node.data , end=''',''' )
in_order(node.right )
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=''',''' )
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
SCREAMING_SNAKE_CASE_ : queue.Queue = queue.Queue()
q.put(__a )
while not q.empty():
SCREAMING_SNAKE_CASE_ : Tuple = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
SCREAMING_SNAKE_CASE_ : queue.Queue = queue.Queue()
q.put(__a )
while not q.empty():
SCREAMING_SNAKE_CASE_ : str = []
while not q.empty():
SCREAMING_SNAKE_CASE_ : List[str] = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__a )
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
SCREAMING_SNAKE_CASE_ : list[TreeNode] = []
SCREAMING_SNAKE_CASE_ : Union[str, Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=''',''' )
stack.append(__a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = n.left
# end of while means current node doesn't have left child
SCREAMING_SNAKE_CASE_ : Tuple = stack.pop()
# start to traverse its right child
SCREAMING_SNAKE_CASE_ : str = n.right
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
SCREAMING_SNAKE_CASE_ : list[TreeNode] = []
SCREAMING_SNAKE_CASE_ : Any = node
while n or stack:
while n:
stack.append(__a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = n.left
SCREAMING_SNAKE_CASE_ : Any = stack.pop()
print(n.data , end=''',''' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = n.right
def _A (__a ) -> None:
"""simple docstring"""
if not isinstance(__a , __a ) or not node:
return
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = [], []
SCREAMING_SNAKE_CASE_ : List[Any] = node
stacka.append(__a )
while stacka: # to find the reversed order of post order, store it in stack2
SCREAMING_SNAKE_CASE_ : List[str] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__a )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=''',''' )
def _A (__a = "" , __a=50 , __a="*" ) -> str:
"""simple docstring"""
if not s:
return "\n" + width * char
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = divmod(width - len(__a ) - 2 , 2 )
return f'{left * char} {s} {(left + extra) * char}'
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
UpperCAmelCase_ : TreeNode = build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 50 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 318 | 1 |
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Dict=5 ) ->Any:
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('<mask>' ) == 1
lowerCamelCase__ : List[str] =torch.tensor(tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ ) ).unsqueeze(0 ) # Batch size 1
lowerCamelCase__ : Tuple =model(snake_case_ )[0] # The last hidden-state is the first element of the output tuple
lowerCamelCase__ : Dict =(input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
lowerCamelCase__ : str =logits[0, masked_index, :]
lowerCamelCase__ : Optional[int] =logits.softmax(dim=0 )
lowerCamelCase__ , lowerCamelCase__ : int =prob.topk(k=snake_case_ , dim=0 )
lowerCamelCase__ : Any =' '.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(snake_case_ ) )] )
lowerCamelCase__ : List[Any] =tokenizer.mask_token
lowerCamelCase__ : List[Any] =[]
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ' ) ):
lowerCamelCase__ : Dict =predicted_token_bpe.replace('\u2581' , ' ' )
if " {0}".format(snake_case_ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(' {0}'.format(snake_case_ ) , snake_case_ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(snake_case_ , snake_case_ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
lowerCAmelCase = CamembertTokenizer.from_pretrained("""camembert-base""")
lowerCAmelCase = CamembertForMaskedLM.from_pretrained("""camembert-base""")
model.eval()
lowerCAmelCase = """Le camembert est <mask> :)"""
print(fill_mask(masked_input, model, tokenizer, topk=3)) | 126 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class A_ :
"""simple docstring"""
def __init__( self :str , lowerCamelCase_ :int , lowerCamelCase_ :List[str]=13 , lowerCamelCase_ :List[Any]=7 , lowerCamelCase_ :str=True , lowerCamelCase_ :int=True , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :Any=99 , lowerCamelCase_ :Optional[int]=32 , lowerCamelCase_ :Dict=5 , lowerCamelCase_ :Any=4 , lowerCamelCase_ :Tuple=37 , lowerCamelCase_ :Union[str, Any]="gelu" , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :List[str]=512 , lowerCamelCase_ :int=16 , lowerCamelCase_ :List[str]=2 , lowerCamelCase_ :List[str]=0.02 , lowerCamelCase_ :List[Any]=3 , lowerCamelCase_ :Dict=4 , lowerCamelCase_ :Optional[Any]=None , ):
"""simple docstring"""
lowerCamelCase__ : Any =parent
lowerCamelCase__ : Union[str, Any] =batch_size
lowerCamelCase__ : Dict =seq_length
lowerCamelCase__ : List[str] =is_training
lowerCamelCase__ : List[Any] =use_token_type_ids
lowerCamelCase__ : Union[str, Any] =use_labels
lowerCamelCase__ : Optional[Any] =vocab_size
lowerCamelCase__ : List[Any] =hidden_size
lowerCamelCase__ : Optional[int] =num_hidden_layers
lowerCamelCase__ : Tuple =num_attention_heads
lowerCamelCase__ : Optional[Any] =intermediate_size
lowerCamelCase__ : Optional[int] =hidden_act
lowerCamelCase__ : List[Any] =hidden_dropout_prob
lowerCamelCase__ : str =attention_probs_dropout_prob
lowerCamelCase__ : Tuple =max_position_embeddings
lowerCamelCase__ : Union[str, Any] =type_vocab_size
lowerCamelCase__ : Dict =type_sequence_label_size
lowerCamelCase__ : str =initializer_range
lowerCamelCase__ : Any =num_labels
lowerCamelCase__ : int =num_choices
lowerCamelCase__ : List[str] =scope
lowerCamelCase__ : List[str] =self.vocab_size - 1
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
lowerCamelCase__ : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Union[str, Any] =None
if self.use_token_type_ids:
lowerCamelCase__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : Any =None
lowerCamelCase__ : Any =None
lowerCamelCase__ : str =None
if self.use_labels:
lowerCamelCase__ : int =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Any =ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : int =OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCamelCase__ : List[str] =ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCAmelCase__ ( self :List[str] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[int] , *lowerCamelCase_ :Any ):
"""simple docstring"""
lowerCamelCase__ : Any =OpenAIGPTModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int =model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , head_mask=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] =model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self :List[str] , lowerCamelCase_ :str , lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[str] , *lowerCamelCase_ :List[Any] ):
"""simple docstring"""
lowerCamelCase__ : int =OpenAIGPTLMHeadModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int =model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self :Tuple , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :str , *lowerCamelCase_ :Dict ):
"""simple docstring"""
lowerCamelCase__ : Tuple =OpenAIGPTDoubleHeadsModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[Any] =model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self :List[Any] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :str , *lowerCamelCase_ :List[Any] ):
"""simple docstring"""
lowerCamelCase__ : List[str] =self.num_labels
lowerCamelCase__ : Tuple =OpenAIGPTForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[int] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : List[str] =model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self :List[str] ):
"""simple docstring"""
lowerCamelCase__ : str =self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Dict =config_and_inputs
lowerCamelCase__ : Tuple ={
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class A_ ( A__ , A__ , A__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
SCREAMING_SNAKE_CASE_ = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase__ ( self :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] ):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def UpperCAmelCase__ ( self :Dict , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[Any]=False ):
"""simple docstring"""
lowerCamelCase__ : str =super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCamelCase__ : Dict =torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCamelCase_ , )
lowerCamelCase__ : Union[str, Any] =inputs_dict['labels']
lowerCamelCase__ : Tuple =inputs_dict['labels']
lowerCamelCase__ : int =torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCamelCase_ , )
lowerCamelCase__ : Optional[Any] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
return inputs_dict
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : List[str] =OpenAIGPTModelTester(self )
lowerCamelCase__ : Union[str, Any] =ConfigTester(self , config_class=lowerCamelCase_ , n_embd=37 )
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowerCamelCase_ )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCamelCase_ )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowerCamelCase_ )
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCamelCase_ )
@slow
def UpperCAmelCase__ ( self :List[str] ):
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Optional[Any] =OpenAIGPTModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self :Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(lowerCamelCase_ )
lowerCamelCase__ : List[str] =torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=lowerCamelCase_ ) # the president is
lowerCamelCase__ : List[Any] =[
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCamelCase__ : Tuple =model.generate(lowerCamelCase_ , do_sample=lowerCamelCase_ )
self.assertListEqual(output_ids[0].tolist() , lowerCamelCase_ ) | 126 | 1 |
from __future__ import annotations
import math
def __UpperCamelCase ( _lowerCAmelCase ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCamelCase ( _lowerCAmelCase ) -> list[int]:
"""simple docstring"""
A : Dict = str(_lowerCAmelCase )
A : List[Any] = [n]
for i in range(1 , len(_lowerCAmelCase ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def __UpperCamelCase ( _lowerCAmelCase ) -> bool:
"""simple docstring"""
if len(str(_lowerCAmelCase ) ) > 3:
if not is_prime(int(str(_lowerCAmelCase )[-3:] ) ) or not is_prime(int(str(_lowerCAmelCase )[:3] ) ):
return False
return True
def __UpperCamelCase ( _lowerCAmelCase = 11 ) -> list[int]:
"""simple docstring"""
A : list[int] = []
A : Any = 13
while len(_lowerCAmelCase ) != count:
if validate(_lowerCAmelCase ):
A : str = list_truncated_nums(_lowerCAmelCase )
if all(is_prime(_lowerCAmelCase ) for i in list_nums ):
list_truncated_primes.append(_lowerCAmelCase )
num += 2
return list_truncated_primes
def __UpperCamelCase ( ) -> int:
"""simple docstring"""
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(11)) = }""")
| 363 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __UpperCamelCase ( ) -> Dict:
"""simple docstring"""
A : str = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
A : int = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(_lowerCAmelCase )
# Let's go
A : str = parser.parse_args()
if not hasattr(_lowerCAmelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
A : Any = args.func(_lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 115 | 0 |
def A_ ( _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : Tuple = len(_SCREAMING_SNAKE_CASE )
while cur > 1:
# Find the maximum number in arr
UpperCamelCase : Any = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
UpperCamelCase : Optional[int] = arr[mi::-1] + arr[mi + 1 : len(_SCREAMING_SNAKE_CASE )]
# Reverse whole list
UpperCamelCase : str = arr[cur - 1 :: -1] + arr[cur : len(_SCREAMING_SNAKE_CASE )]
cur -= 1
return arr
if __name__ == "__main__":
__lowerCamelCase : List[str] = input("""Enter numbers separated by a comma:\n""").strip()
__lowerCamelCase : List[Any] = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 52 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
if len(_SCREAMING_SNAKE_CASE ) <= 1:
return lst
_UpperCAmelCase = 1
while i < len(_SCREAMING_SNAKE_CASE ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_UpperCAmelCase , _UpperCAmelCase = lst[i], lst[i - 1]
i -= 1
if i == 0:
_UpperCAmelCase = 1
return lst
if __name__ == "__main__":
__A : Dict = input("Enter numbers separated by a comma:\n").strip()
__A : List[Any] = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 260 | 0 |
def lowerCAmelCase_ ( )-> int:
'''simple docstring'''
return [
a * b * (10_00 - a - b)
for a in range(1 , 9_99 )
for b in range(__lowerCAmelCase , 9_99 )
if (a * a + b * b == (10_00 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f'{solution() = }')
| 78 | import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : List[Any] = KandinskyVaaControlnetPipeline
__lowerCamelCase : int = ["""image_embeds""", """negative_image_embeds""", """hint"""]
__lowerCamelCase : Optional[int] = ["""image_embeds""", """negative_image_embeds""", """hint"""]
__lowerCamelCase : Optional[Any] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__lowerCamelCase : Dict = False
@property
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return 32
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return self.time_input_dim
@property
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
return 100
@property
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Any ={
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
UpperCAmelCase : List[Any] =UNetaDConditionModel(**snake_case__ )
return model
@property
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Any =VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[str] =self.dummy_unet
UpperCAmelCase : Tuple =self.dummy_movq
UpperCAmelCase : Union[str, Any] =DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=snake_case__ , )
UpperCAmelCase : Tuple ={
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def UpperCAmelCase__ ( self , snake_case__ , snake_case__=0 ) -> Any:
'''simple docstring'''
UpperCAmelCase : str =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
UpperCAmelCase : Tuple =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create hint
UpperCAmelCase : Tuple =floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
if str(snake_case__ ).startswith('''mps''' ):
UpperCAmelCase : Optional[int] =torch.manual_seed(snake_case__ )
else:
UpperCAmelCase : int =torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
UpperCAmelCase : List[str] ={
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : List[Any] ='''cpu'''
UpperCAmelCase : List[Any] =self.get_dummy_components()
UpperCAmelCase : Tuple =self.pipeline_class(**snake_case__ )
UpperCAmelCase : Tuple =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase : Optional[int] =pipe(**self.get_dummy_inputs(snake_case__ ) )
UpperCAmelCase : str =output.images
UpperCAmelCase : List[str] =pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
UpperCAmelCase : Union[str, Any] =image[0, -3:, -3:, -1]
UpperCAmelCase : List[str] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : Union[str, Any] =np.array(
[0.695_9826, 0.86_8279, 0.755_8092, 0.6876_9467, 0.8580_5804, 0.6597_7496, 0.4488_5302, 0.595_9111, 0.425_1595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy''' )
UpperCAmelCase : Tuple =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
UpperCAmelCase : int =torch.from_numpy(np.array(snake_case__ ) ).float() / 255.0
UpperCAmelCase : List[str] =hint.permute(2 , 0 , 1 ).unsqueeze(0 )
UpperCAmelCase : Dict =KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
UpperCAmelCase : int =KandinskyVaaControlnetPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
UpperCAmelCase : str =pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase : int ='''A robot, 4k photo'''
UpperCAmelCase : int =torch.Generator(device='''cuda''' ).manual_seed(0 )
UpperCAmelCase , UpperCAmelCase : List[str] =pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
UpperCAmelCase : List[str] =torch.Generator(device='''cuda''' ).manual_seed(0 )
UpperCAmelCase : Dict =pipeline(
image_embeds=snake_case__ , negative_image_embeds=snake_case__ , hint=snake_case__ , generator=snake_case__ , num_inference_steps=100 , output_type='''np''' , )
UpperCAmelCase : List[Any] =output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 78 | 1 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_a : str = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : str ,_lowerCamelCase : Dict ,_lowerCamelCase : str=None ) -> List[str]:
# Initialise PyTorch model
_lowerCAmelCase : str = XLNetConfig.from_json_file(_lowerCamelCase )
_lowerCAmelCase : int = finetuning_task.lower() if finetuning_task is not None else """"""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"Building PyTorch XLNetForSequenceClassification model from configuration: {config}" )
_lowerCAmelCase : str = finetuning_task
_lowerCAmelCase : Optional[int] = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCAmelCase : Any = XLNetForSequenceClassification(_lowerCamelCase )
elif "squad" in finetuning_task:
_lowerCAmelCase : Optional[int] = finetuning_task
_lowerCAmelCase : List[str] = XLNetForQuestionAnswering(_lowerCamelCase )
else:
_lowerCAmelCase : List[str] = XLNetLMHeadModel(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# Save pytorch-model
_lowerCAmelCase : str = os.path.join(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Any = os.path.join(_lowerCamelCase ,_lowerCamelCase )
print(f"Save PyTorch model to {os.path.abspath(_lowerCamelCase )}" )
torch.save(model.state_dict() ,_lowerCamelCase )
print(f"Save configuration file to {os.path.abspath(_lowerCamelCase )}" )
with open(_lowerCamelCase ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_a : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
_a : List[Any] = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 44 |
from sklearn.metrics import mean_squared_error
import datasets
lowerCamelCase = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
lowerCamelCase = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
lowerCamelCase = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def lowerCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] , )
def lowerCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float' ) ),
"references": datasets.Sequence(datasets.Value('float' ) ),
}
else:
return {
"predictions": datasets.Value('float' ),
"references": datasets.Value('float' ),
}
def lowerCamelCase ( self : List[Any] , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : List[str]=None , lowercase_ : Union[str, Any]="uniform_average" , lowercase_ : Tuple=True ) -> Any:
"""simple docstring"""
_lowerCamelCase : List[str] =mean_squared_error(
lowercase_ , lowercase_ , sample_weight=lowercase_ , multioutput=lowercase_ , squared=lowercase_ )
return {"mse": mse}
| 199 | 0 |
'''simple docstring'''
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class __UpperCamelCase :
def __init__( self , __a , __a=99 , __a=13 , __a=16 , __a=7 , __a=True , __a=True , __a=True , __a=False , __a=True , __a=2 , __a=32 , __a=4 , __a=4 , __a=30 , __a=0 , __a=1 , __a=2 , __a=None , ):
'''simple docstring'''
__a : Optional[Any] = parent
__a : Optional[int] = batch_size
__a : List[Any] = decoder_seq_length
# For common tests
__a : Tuple = self.decoder_seq_length
__a : Any = is_training
__a : Dict = use_attention_mask
__a : Any = use_labels
__a : Tuple = vocab_size
__a : List[str] = d_model
__a : Optional[int] = d_model
__a : int = decoder_layers
__a : int = decoder_layers
__a : Optional[Any] = decoder_ffn_dim
__a : Union[str, Any] = decoder_attention_heads
__a : Dict = decoder_attention_heads
__a : Optional[Any] = eos_token_id
__a : Optional[Any] = bos_token_id
__a : str = pad_token_id
__a : Tuple = decoder_start_token_id
__a : Tuple = use_cache
__a : Optional[Any] = max_position_embeddings
__a : List[str] = None
__a : int = decoder_seq_length
__a : Dict = 2
__a : Dict = 1
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__a : Any = None
if self.use_attention_mask:
__a : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
__a : int = None
if self.use_labels:
__a : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__a : Any = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def __UpperCAmelCase ( self , __a , __a , __a , __a , ):
'''simple docstring'''
__a : Optional[Any] = True
__a : str = TrOCRDecoder(config=__a ).to(__a ).eval()
__a : List[Any] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__a : Any = model(__a , use_cache=__a )
__a : List[Any] = model(__a )
__a : Optional[Any] = model(__a , use_cache=__a )
self.parent.assertTrue(len(__a ) == len(__a ) )
self.parent.assertTrue(len(__a ) == len(__a ) + 1 )
__a : Tuple = outputs['past_key_values']
# create hypothetical next token and extent to next_input_ids
__a : Any = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
__a : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
__a : Tuple = model(__a )['last_hidden_state']
__a : Dict = model(__a , past_key_values=__a )['last_hidden_state']
# select random slice
__a : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a : Union[str, Any] = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__a : str = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(__a , __a , atol=1E-3 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.prepare_config_and_inputs()
__a , __a , __a , __a : int = config_and_inputs
__a : Dict = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
A_ = (TrOCRForCausalLM,) if is_torch_available() else ()
A_ = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
A_ = True
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = TrOCRStandaloneDecoderModelTester(self , is_training=__a )
__a : str = ConfigTester(self , config_class=__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
return
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
| 294 |
'''simple docstring'''
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('To use the rich extension, install rich with `pip install rich`')
| 294 | 1 |
def lowerCamelCase_ ( _a : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = len(_lowerCAmelCase )
UpperCAmelCase_ : Union[str, Any] = sum(_lowerCAmelCase )
UpperCAmelCase_ : int = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
UpperCAmelCase_ : Optional[Any] = True
for i in range(1 , s + 1 ):
UpperCAmelCase_ : Tuple = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
UpperCAmelCase_ : Dict = dp[i][j - 1]
if arr[i - 1] <= j:
UpperCAmelCase_ : Optional[Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
UpperCAmelCase_ : int = s - 2 * j
break
return diff
| 345 |
"""simple docstring"""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__snake_case = '''
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__snake_case = '''\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
'''
__snake_case = '''
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=["About 95 species are currently accepted ."]
>>> predictions=["About 95 you now get in ."]
>>> references=[["About 95 species are currently known ."]]
>>> wiki_split = datasets.load_metric("wiki_split")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}
'''
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
def remove_articles(_lowerCAmelCase : Optional[int] ):
_a = re.compile(R'''\b(a|an|the)\b''', re.UNICODE )
return re.sub(_lowerCAmelCase, ''' ''', _lowerCAmelCase )
def white_space_fix(_lowerCAmelCase : Tuple ):
return " ".join(text.split() )
def remove_punc(_lowerCAmelCase : Tuple ):
_a = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCAmelCase : List[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) )
def A_ ( _lowerCAmelCase : List[Any], _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return int(normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Any ):
"""simple docstring"""
_a = [any(compute_exact(_lowerCAmelCase, _lowerCAmelCase ) for ref in refs ) for pred, refs in zip(_lowerCAmelCase, _lowerCAmelCase )]
return (sum(_lowerCAmelCase ) / len(_lowerCAmelCase )) * 1_00
def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : List[Any], _lowerCAmelCase : str, _lowerCAmelCase : str ):
"""simple docstring"""
_a = [rgram for rgrams in rgramslist for rgram in rgrams]
_a = Counter(_lowerCAmelCase )
_a = Counter(_lowerCAmelCase )
_a = Counter()
for sgram, scount in sgramcounter.items():
_a = scount * numref
_a = Counter(_lowerCAmelCase )
_a = Counter()
for cgram, ccount in cgramcounter.items():
_a = ccount * numref
# KEEP
_a = sgramcounter_rep & cgramcounter_rep
_a = keepgramcounter_rep & rgramcounter
_a = sgramcounter_rep & rgramcounter
_a = 0
_a = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
_a = 1
if len(_lowerCAmelCase ) > 0:
_a = keeptmpscorea / len(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_a = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_a = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_a = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_a = sgramcounter_rep - cgramcounter_rep
_a = delgramcounter_rep - rgramcounter
_a = sgramcounter_rep - rgramcounter
_a = 0
_a = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
if len(_lowerCAmelCase ) > 0:
_a = deltmpscorea / len(_lowerCAmelCase )
# ADDITION
_a = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
_a = set(_lowerCAmelCase ) & set(_lowerCAmelCase )
_a = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
_a = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_a = 1
_a = 1
if len(_lowerCAmelCase ) > 0:
_a = addtmpscore / len(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
_a = addtmpscore / len(_lowerCAmelCase )
_a = 0
if addscore_precision > 0 or addscore_recall > 0:
_a = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Dict, _lowerCAmelCase : Any ):
"""simple docstring"""
_a = len(_lowerCAmelCase )
_a = ssent.split(''' ''' )
_a = csent.split(''' ''' )
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
_a = []
for rsent in rsents:
_a = rsent.split(''' ''' )
_a = []
_a = []
_a = []
ragramslist.append(_lowerCAmelCase )
for i in range(0, len(_lowerCAmelCase ) - 1 ):
if i < len(_lowerCAmelCase ) - 1:
_a = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 2:
_a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 3:
_a = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(_lowerCAmelCase )
ragramslist.append(_lowerCAmelCase )
ragramslist.append(_lowerCAmelCase )
ragramslist.append(_lowerCAmelCase )
for i in range(0, len(_lowerCAmelCase ) - 1 ):
if i < len(_lowerCAmelCase ) - 1:
_a = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 2:
_a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 3:
_a = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(_lowerCAmelCase )
for i in range(0, len(_lowerCAmelCase ) - 1 ):
if i < len(_lowerCAmelCase ) - 1:
_a = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 2:
_a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(_lowerCAmelCase )
if i < len(_lowerCAmelCase ) - 3:
_a = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(_lowerCAmelCase )
((_a) , (_a) , (_a)) = SARIngram(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
((_a) , (_a) , (_a)) = SARIngram(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
((_a) , (_a) , (_a)) = SARIngram(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
((_a) , (_a) , (_a)) = SARIngram(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
_a = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_a = sum([delascore, delascore, delascore, delascore] ) / 4
_a = sum([addascore, addascore, addascore, addascore] ) / 4
_a = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : bool = True, _lowerCAmelCase : str = "13a", _lowerCAmelCase : bool = True ):
"""simple docstring"""
if lowercase:
_a = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_a = sacrebleu.metrics.bleu._get_tokenizer(_lowerCAmelCase )()(_lowerCAmelCase )
else:
_a = sacrebleu.TOKENIZERS[tokenizer]()(_lowerCAmelCase )
elif tokenizer == "moses":
_a = sacremoses.MosesTokenizer().tokenize(_lowerCAmelCase, return_str=_lowerCAmelCase, escape=_lowerCAmelCase )
elif tokenizer == "penn":
_a = sacremoses.MosesTokenizer().penn_tokenize(_lowerCAmelCase, return_str=_lowerCAmelCase )
else:
_a = sentence
if not return_str:
_a = normalized_sent.split()
return normalized_sent
def A_ ( _lowerCAmelCase : List[Any], _lowerCAmelCase : Dict, _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if not (len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == len(_lowerCAmelCase )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
_a = 0
for src, pred, refs in zip(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ):
sari_score += SARIsent(normalize(_lowerCAmelCase ), normalize(_lowerCAmelCase ), [normalize(_lowerCAmelCase ) for sent in refs] )
_a = sari_score / len(_lowerCAmelCase )
return 1_00 * sari_score
def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Tuple, _lowerCAmelCase : Any="exp", _lowerCAmelCase : Tuple=None, _lowerCAmelCase : Union[str, Any]=False, _lowerCAmelCase : Optional[Any]=False, _lowerCAmelCase : List[str]=False, ):
"""simple docstring"""
_a = len(references[0] )
if any(len(_lowerCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_a = [[refs[i] for refs in references] for i in range(_lowerCAmelCase )]
_a = sacrebleu.corpus_bleu(
_lowerCAmelCase, _lowerCAmelCase, smooth_method=_lowerCAmelCase, smooth_value=_lowerCAmelCase, force=_lowerCAmelCase, lowercase=_lowerCAmelCase, use_effective_order=_lowerCAmelCase, )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
_a = {}
result.update({'''sari''': compute_sari(sources=__UpperCAmelCase , predictions=__UpperCAmelCase , references=__UpperCAmelCase )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=__UpperCAmelCase , references=__UpperCAmelCase )} )
result.update({'''exact''': compute_em(predictions=__UpperCAmelCase , references=__UpperCAmelCase )} )
return result | 320 | 0 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = (IPNDMScheduler,)
__lowerCamelCase = (('num_inference_steps', 50),)
def UpperCAmelCase ( self :Optional[int] , **_lowercase :Dict ):
'''simple docstring'''
lowercase__ = {"num_train_timesteps": 10_00}
config.update(**_lowercase )
return config
def UpperCAmelCase ( self :Optional[Any] , _lowercase :Dict=0 , **_lowercase :Optional[Any] ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps" , _lowercase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config(**_lowercase )
lowercase__ = scheduler_class(**_lowercase )
scheduler.set_timesteps(_lowercase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
if time_step is None:
lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase )
lowercase__ = scheduler_class.from_pretrained(_lowercase )
new_scheduler.set_timesteps(_lowercase )
# copy over dummy past residuals
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
lowercase__ = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
lowercase__ = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
pass
def UpperCAmelCase ( self :Tuple , _lowercase :Any=0 , **_lowercase :Any ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps" , _lowercase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_lowercase )
scheduler.set_timesteps(_lowercase )
# copy over dummy past residuals (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
if time_step is None:
lowercase__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase )
lowercase__ = scheduler_class.from_pretrained(_lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase )
# copy over dummy past residual (must be after setting timesteps)
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
lowercase__ = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowercase__ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
lowercase__ = new_scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self :Optional[Any] , **_lowercase :List[str] ):
'''simple docstring'''
lowercase__ = self.scheduler_classes[0]
lowercase__ = self.get_scheduler_config(**_lowercase )
lowercase__ = scheduler_class(**_lowercase )
lowercase__ = 10
lowercase__ = self.dummy_model()
lowercase__ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = model(_lowercase , _lowercase )
lowercase__ = scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowercase__ = model(_lowercase , _lowercase )
lowercase__ = scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
return sample
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = dict(self.forward_default_kwargs )
lowercase__ = kwargs.pop("num_inference_steps" , _lowercase )
for scheduler_class in self.scheduler_classes:
lowercase__ = self.get_scheduler_config()
lowercase__ = scheduler_class(**_lowercase )
lowercase__ = self.dummy_sample
lowercase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(_lowercase , "set_timesteps" ):
scheduler.set_timesteps(_lowercase )
elif num_inference_steps is not None and not hasattr(_lowercase , "set_timesteps" ):
lowercase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowercase__ = dummy_past_residuals[:]
lowercase__ = scheduler.timesteps[5]
lowercase__ = scheduler.timesteps[6]
lowercase__ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
lowercase__ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowercase__ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
lowercase__ = scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_lowercase , time_step=_lowercase )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=_lowercase , time_step=_lowercase )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ = self.full_loop()
lowercase__ = torch.mean(torch.abs(_lowercase ) )
assert abs(result_mean.item() - 2_54_05_29 ) < 10
| 364 |
def _A ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
if height >= 1:
move_tower(height - 1 , __magic_name__ , __magic_name__ , __magic_name__ )
move_disk(__magic_name__ , __magic_name__ )
move_tower(height - 1 , __magic_name__ , __magic_name__ , __magic_name__ )
def _A ( __magic_name__ , __magic_name__ ):
print("moving disk from" , __magic_name__ , "to" , __magic_name__ )
def _A ( ):
lowercase__ = int(input("Height of hanoi: " ).strip() )
move_tower(__magic_name__ , "A" , "B" , "C" )
if __name__ == "__main__":
main()
| 201 | 0 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__snake_case = logging.get_logger(__name__)
class lowercase__ ( _UpperCAmelCase ):
A__ : List[str] =["""pixel_values"""]
def __init__( self : Dict , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : int = 32 , UpperCAmelCase_ : int=PILImageResampling.BILINEAR , UpperCAmelCase_ : bool = True , **UpperCAmelCase_ : Any , ):
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = do_rescale
SCREAMING_SNAKE_CASE__ = size_divisor
SCREAMING_SNAKE_CASE__ = resample
super().__init__(**UpperCAmelCase_ )
def A_ ( self : Optional[int] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[ChannelDimension] = None , **UpperCAmelCase_ : Dict ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_image_size(UpperCAmelCase_ )
# Rounds the height and width down to the closest multiple of size_divisor
SCREAMING_SNAKE_CASE__ = height // size_divisor * size_divisor
SCREAMING_SNAKE_CASE__ = width // size_divisor * size_divisor
SCREAMING_SNAKE_CASE__ = resize(UpperCAmelCase_ , (new_h, new_w) , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
return image
def A_ ( self : Any , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : float , UpperCAmelCase_ : Optional[ChannelDimension] = None , **UpperCAmelCase_ : List[str] ):
return rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def A_ ( self : Dict , UpperCAmelCase_ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[TensorType, str]] = None , UpperCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase_ : List[Any] , ):
SCREAMING_SNAKE_CASE__ = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ = size_divisor if size_divisor is not None else self.size_divisor
SCREAMING_SNAKE_CASE__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
SCREAMING_SNAKE_CASE__ = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ = [to_numpy_array(UpperCAmelCase_ ) for img in images]
if do_resize:
SCREAMING_SNAKE_CASE__ = [self.resize(UpperCAmelCase_ , size_divisor=UpperCAmelCase_ , resample=UpperCAmelCase_ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ = [self.rescale(UpperCAmelCase_ , scale=1 / 255 ) for image in images]
SCREAMING_SNAKE_CASE__ = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_ ) for image in images]
SCREAMING_SNAKE_CASE__ = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_ )
| 176 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__snake_case = NewType("""DataClass""", Any)
__snake_case = NewType("""DataClassType""", Any)
def _lowercase ( UpperCamelCase_ ) -> int:
'''simple docstring'''
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' )
def _lowercase ( UpperCamelCase_ ) -> Callable[[str], Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {str(UpperCamelCase_ ): choice for choice in choices}
return lambda UpperCamelCase_ : str_to_choice.get(UpperCamelCase_ , UpperCamelCase_ )
def _lowercase ( *,
UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = dataclasses.MISSING , UpperCamelCase_ = dataclasses.MISSING , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> dataclasses.Field:
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
SCREAMING_SNAKE_CASE__ = {}
if aliases is not None:
SCREAMING_SNAKE_CASE__ = aliases
if help is not None:
SCREAMING_SNAKE_CASE__ = help
return dataclasses.field(metadata=UpperCamelCase_ , default=UpperCamelCase_ , default_factory=UpperCamelCase_ , **UpperCamelCase_ )
class lowercase__ ( _UpperCAmelCase ):
A__ : Iterable[DataClassType]
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Union[DataClassType, Iterable[DataClassType]] , **UpperCAmelCase_ : Optional[Any] ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
SCREAMING_SNAKE_CASE__ = ArgumentDefaultsHelpFormatter
super().__init__(**UpperCAmelCase_ )
if dataclasses.is_dataclass(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = [dataclass_types]
SCREAMING_SNAKE_CASE__ = list(UpperCAmelCase_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(UpperCAmelCase_ )
@staticmethod
def A_ ( UpperCAmelCase_ : ArgumentParser , UpperCAmelCase_ : dataclasses.Field ):
SCREAMING_SNAKE_CASE__ = F'--{field.name}'
SCREAMING_SNAKE_CASE__ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , UpperCAmelCase_ ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
SCREAMING_SNAKE_CASE__ = kwargs.pop('aliases' , [] )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = [aliases]
SCREAMING_SNAKE_CASE__ = getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(UpperCAmelCase_ , 'UnionType' ) and isinstance(UpperCAmelCase_ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(UpperCAmelCase_ ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F' Problem encountered in field \'{field.name}\'.' )
if type(UpperCAmelCase_ ) not in field.type.__args__:
# filter `str` in Union
SCREAMING_SNAKE_CASE__ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
SCREAMING_SNAKE_CASE__ = getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
SCREAMING_SNAKE_CASE__ = (
field.type.__args__[0] if isinstance(UpperCAmelCase_ , field.type.__args__[1] ) else field.type.__args__[1]
)
SCREAMING_SNAKE_CASE__ = getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
SCREAMING_SNAKE_CASE__ = {}
if origin_type is Literal or (isinstance(field.type , UpperCAmelCase_ ) and issubclass(field.type , UpperCAmelCase_ )):
if origin_type is Literal:
SCREAMING_SNAKE_CASE__ = field.type.__args__
else:
SCREAMING_SNAKE_CASE__ = [x.value for x in field.type]
SCREAMING_SNAKE_CASE__ = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default
else:
SCREAMING_SNAKE_CASE__ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
SCREAMING_SNAKE_CASE__ = copy(UpperCAmelCase_ )
# Hack because type=bool in argparse does not behave as we want.
SCREAMING_SNAKE_CASE__ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
SCREAMING_SNAKE_CASE__ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
SCREAMING_SNAKE_CASE__ = default
# This tells argparse we accept 0 or 1 value after --field_name
SCREAMING_SNAKE_CASE__ = '?'
# This is the value that will get picked if we do --field_name (without value)
SCREAMING_SNAKE_CASE__ = True
elif isclass(UpperCAmelCase_ ) and issubclass(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = field.type.__args__[0]
SCREAMING_SNAKE_CASE__ = '+'
if field.default_factory is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default_factory()
elif field.default is dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = True
else:
SCREAMING_SNAKE_CASE__ = field.type
if field.default is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default
elif field.default_factory is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default_factory()
else:
SCREAMING_SNAKE_CASE__ = True
parser.add_argument(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
SCREAMING_SNAKE_CASE__ = False
parser.add_argument(F'--no_{field.name}' , action='store_false' , dest=field.name , **UpperCAmelCase_ )
def A_ ( self : List[Any] , UpperCAmelCase_ : DataClassType ):
if hasattr(UpperCAmelCase_ , '_argument_group_name' ):
SCREAMING_SNAKE_CASE__ = self.add_argument_group(dtype._argument_group_name )
else:
SCREAMING_SNAKE_CASE__ = self
try:
SCREAMING_SNAKE_CASE__ = get_type_hints(UpperCAmelCase_ )
except NameError:
raise RuntimeError(
F'Type resolution failed for {dtype}. Try declaring the class in global scope or '
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = '.'.join(map(UpperCAmelCase_ , sys.version_info[:3] ) )
raise RuntimeError(
F'Type resolution failed for {dtype} on Python {python_version}. Try removing '
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(UpperCAmelCase_ ):
if not field.init:
continue
SCREAMING_SNAKE_CASE__ = type_hints[field.name]
self._parse_dataclass_field(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : Dict , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : str=None , UpperCAmelCase_ : str=None , ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
SCREAMING_SNAKE_CASE__ = []
if args_filename:
args_files.append(Path(UpperCAmelCase_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
SCREAMING_SNAKE_CASE__ = ArgumentParser()
args_file_parser.add_argument(UpperCAmelCase_ , type=UpperCAmelCase_ , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = args_file_parser.parse_known_args(args=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = vars(UpperCAmelCase_ ).get(args_file_flag.lstrip('-' ) , UpperCAmelCase_ )
if cmd_args_file_paths:
args_files.extend([Path(UpperCAmelCase_ ) for p in cmd_args_file_paths] )
SCREAMING_SNAKE_CASE__ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
SCREAMING_SNAKE_CASE__ = file_args + args if args is not None else file_args + sys.argv[1:]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.parse_known_args(args=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = []
for dtype in self.dataclass_types:
SCREAMING_SNAKE_CASE__ = {f.name for f in dataclasses.fields(UpperCAmelCase_ ) if f.init}
SCREAMING_SNAKE_CASE__ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k in keys}
for k in keys:
delattr(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = dtype(**UpperCAmelCase_ )
outputs.append(UpperCAmelCase_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(UpperCAmelCase_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' )
return (*outputs,)
def A_ ( self : str , UpperCAmelCase_ : Dict[str, Any] , UpperCAmelCase_ : bool = False ):
SCREAMING_SNAKE_CASE__ = set(args.keys() )
SCREAMING_SNAKE_CASE__ = []
for dtype in self.dataclass_types:
SCREAMING_SNAKE_CASE__ = {f.name for f in dataclasses.fields(UpperCAmelCase_ ) if f.init}
SCREAMING_SNAKE_CASE__ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
SCREAMING_SNAKE_CASE__ = dtype(**UpperCAmelCase_ )
outputs.append(UpperCAmelCase_ )
if not allow_extra_keys and unused_keys:
raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(UpperCAmelCase_ )}' )
return tuple(UpperCAmelCase_ )
def A_ ( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ):
with open(Path(UpperCAmelCase_ ) , encoding='utf-8' ) as open_json_file:
SCREAMING_SNAKE_CASE__ = json.loads(open_json_file.read() )
SCREAMING_SNAKE_CASE__ = self.parse_dict(UpperCAmelCase_ , allow_extra_keys=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def A_ ( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ):
SCREAMING_SNAKE_CASE__ = self.parse_dict(yaml.safe_load(Path(UpperCAmelCase_ ).read_text() ) , allow_extra_keys=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
| 176 | 1 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : str = mock.Mock()
_snake_case : List[str] = 5_00
_snake_case : Any = {}
_snake_case : str = HTTPError
_snake_case : List[Any] = {}
# Download this model to make sure it's in the cache.
_snake_case : Optional[Any] = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=UpperCamelCase ) as mock_head:
_snake_case : Any = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = mock.Mock()
_snake_case : Union[str, Any] = 5_00
_snake_case : Any = {}
_snake_case : Any = HTTPError
_snake_case : Optional[int] = {}
# Download this model to make sure it's in the cache.
_snake_case : Optional[int] = GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=UpperCamelCase ) as mock_head:
_snake_case : int = GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
try:
_snake_case : Optional[int] = tempfile.mktemp()
with open(UpperCamelCase , 'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , UpperCamelCase )
_snake_case : Optional[Any] = AlbertTokenizer.from_pretrained(UpperCamelCase )
finally:
os.remove(UpperCamelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' , 'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , UpperCamelCase )
_snake_case : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 10_00 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a_ : Dict =["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def UpperCamelCase_ ( cls : Tuple ):
'''simple docstring'''
_snake_case : Optional[Any] = TOKEN
HfFolder.save_token(UpperCamelCase )
@classmethod
def UpperCamelCase_ ( cls : Tuple ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case : Tuple = os.path.join(UpperCamelCase , 'vocab.txt' )
with open(UpperCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_snake_case : int = BertTokenizer(UpperCamelCase )
tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token )
_snake_case : Any = BertTokenizer.from_pretrained(f"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCamelCase , repo_id='test-tokenizer' , push_to_hub=UpperCamelCase , use_auth_token=self._token )
_snake_case : Optional[Any] = BertTokenizer.from_pretrained(f"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case : int = os.path.join(UpperCamelCase , 'vocab.txt' )
with open(UpperCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_snake_case : Tuple = BertTokenizer(UpperCamelCase )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token )
_snake_case : Optional[int] = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
UpperCamelCase , repo_id='valid_org/test-tokenizer-org' , push_to_hub=UpperCamelCase , use_auth_token=self._token )
_snake_case : int = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case : Dict = os.path.join(UpperCamelCase , 'vocab.txt' )
with open(UpperCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_snake_case : Union[str, Any] = CustomTokenizer(UpperCamelCase )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
_snake_case : int = AutoTokenizer.from_pretrained(f"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case : Tuple = os.path.join(UpperCamelCase , 'vocab.txt' )
with open(UpperCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_snake_case : Union[str, Any] = BertTokenizerFast.from_pretrained(UpperCamelCase )
bert_tokenizer.save_pretrained(UpperCamelCase )
_snake_case : List[str] = CustomTokenizerFast.from_pretrained(UpperCamelCase )
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
_snake_case : Optional[int] = AutoTokenizer.from_pretrained(f"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast' )
_snake_case : List[str] = AutoTokenizer.from_pretrained(
f"""{USER}/test-dynamic-tokenizer""" , use_fast=UpperCamelCase , trust_remote_code=UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Tuple = Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : int = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS]', ' This is a ', 'extra_id_100'] )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Optional[Any] = Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) , ['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) , ['BC', 'A'] )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) , ['AB', 'C'] )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : int = Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) , ['ABC', 'D'] )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Dict = Trie()
_snake_case : Any = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(UpperCamelCase , ['AB', 'C'] )
| 260 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : int ="""naver-clova-ix/donut-base-finetuned-docvqa"""
a_ : Dict =(
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
a_ : Optional[Any] ="""document_qa"""
a_ : str =AutoProcessor
a_ : Union[str, Any] =VisionEncoderDecoderModel
a_ : List[Any] =["""image""", """text"""]
a_ : List[Any] =["""text"""]
def __init__( self : Any , *UpperCamelCase : List[Any] , **UpperCamelCase : List[Any] ):
'''simple docstring'''
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : "Image" , UpperCamelCase : str ):
'''simple docstring'''
_snake_case : Dict = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
_snake_case : int = task_prompt.replace('{user_input}' , UpperCamelCase )
_snake_case : Union[str, Any] = self.pre_processor.tokenizer(
UpperCamelCase , add_special_tokens=UpperCamelCase , return_tensors='pt' ).input_ids
_snake_case : Dict = self.pre_processor(UpperCamelCase , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Optional[int] ):
'''simple docstring'''
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=UpperCamelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=UpperCamelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=UpperCamelCase , ).sequences
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : str ):
'''simple docstring'''
_snake_case : Optional[Any] = self.pre_processor.batch_decode(UpperCamelCase )[0]
_snake_case : int = sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
_snake_case : Optional[int] = sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
_snake_case : int = re.sub(R'<.*?>' , '' , UpperCamelCase , count=1 ).strip() # remove first task start token
_snake_case : str = self.pre_processor.tokenajson(UpperCamelCase )
return sequence["answer"]
| 260 | 1 |
"""simple docstring"""
import enum
import shutil
import sys
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ : str = shutil.get_terminal_size()
SCREAMING_SNAKE_CASE_ : Dict = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class a ( enum.Enum ):
"""simple docstring"""
UpperCAmelCase = 0
UpperCAmelCase = 1
def _snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any="" ):
sys.stdout.write(str(UpperCAmelCase_ ) + end )
sys.stdout.flush()
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any]="" ):
forceWrite(F"""\u001b[{color}m{content}\u001b[0m""" , UpperCAmelCase_ )
def _snake_case ( ):
forceWrite("""\r""" )
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : str ):
forceWrite(F"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" )
def _snake_case ( ):
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def _snake_case ( ):
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH )
| 335 |
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Optional[int] , *UpperCamelCase: Optional[Any] , UpperCamelCase: Tuple=None , UpperCamelCase: Tuple=None , **UpperCamelCase: Dict ):
"""simple docstring"""
super().__init__(*UpperCamelCase , **UpperCamelCase )
A__ = eval_examples
A__ = post_process_function
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: Optional[Dataset] = None , UpperCamelCase: List[Any]=None , UpperCamelCase: Optional[List[str]] = None , UpperCamelCase: str = "eval" , **UpperCamelCase: Optional[int] , ):
"""simple docstring"""
A__ = gen_kwargs.copy()
A__ = (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
A__ = (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
A__ = gen_kwargs
A__ = self.eval_dataset if eval_dataset is None else eval_dataset
A__ = self.get_eval_dataloader(UpperCamelCase )
A__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = time.time()
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase , metric_key_prefix=UpperCamelCase , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase , UpperCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
A__ = self.post_process_function(UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = self.compute_metrics(UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase )
metrics.update(output.metrics )
else:
A__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
A__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase )
return metrics
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Dict , UpperCamelCase: List[str] , UpperCamelCase: Dict=None , UpperCamelCase: str = "test" , **UpperCamelCase: Optional[int] ):
"""simple docstring"""
A__ = gen_kwargs.copy()
A__ = self.get_test_dataloader(UpperCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = time.time()
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase , metric_key_prefix=UpperCamelCase , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase , UpperCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
A__ = self.post_process_function(UpperCamelCase , UpperCamelCase , UpperCamelCase , """predict""" )
A__ = self.compute_metrics(UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase )
| 335 | 1 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A :
def __init__( self, UpperCamelCase__, UpperCamelCase__=13, UpperCamelCase__=32, UpperCamelCase__=2, UpperCamelCase__=3, UpperCamelCase__=16, UpperCamelCase__=[32, 64, 128], UpperCamelCase__=[1, 2, 1], UpperCamelCase__=[2, 2, 4], UpperCamelCase__=2, UpperCamelCase__=2.0, UpperCamelCase__=True, UpperCamelCase__=0.0, UpperCamelCase__=0.0, UpperCamelCase__=0.1, UpperCamelCase__="gelu", UpperCamelCase__=False, UpperCamelCase__=True, UpperCamelCase__=0.02, UpperCamelCase__=1E-5, UpperCamelCase__=True, UpperCamelCase__=None, UpperCamelCase__=True, UpperCamelCase__=10, UpperCamelCase__=8, UpperCamelCase__=["stage1", "stage2"], UpperCamelCase__=[1, 2], ):
"""simple docstring"""
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = image_size
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = embed_dim
lowerCAmelCase_ = hidden_sizes
lowerCAmelCase_ = depths
lowerCAmelCase_ = num_heads
lowerCAmelCase_ = window_size
lowerCAmelCase_ = mlp_ratio
lowerCAmelCase_ = qkv_bias
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = drop_path_rate
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = use_absolute_embeddings
lowerCAmelCase_ = patch_norm
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = is_training
lowerCAmelCase_ = scope
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = type_sequence_label_size
lowerCAmelCase_ = encoder_stride
lowerCAmelCase_ = out_features
lowerCAmelCase_ = out_indices
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCAmelCase_ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return FocalNetConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, hidden_sizes=self.hidden_sizes, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, out_features=self.out_features, out_indices=self.out_indices, )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = FocalNetModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase_ = model(UpperCamelCase__ )
lowerCAmelCase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCAmelCase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim) )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = FocalNetBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase_ = model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowerCAmelCase_ = None
lowerCAmelCase_ = FocalNetBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase_ = model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ), 1 )
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = FocalNetForMaskedImageModeling(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase_ = model(UpperCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase_ = 1
lowerCAmelCase_ = FocalNetForMaskedImageModeling(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = self.type_sequence_label_size
lowerCAmelCase_ = FocalNetForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase_ = model(UpperCamelCase__, labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase_ = 1
lowerCAmelCase_ = FocalNetForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = config_and_inputs
lowerCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__snake_case = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
__snake_case = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = FocalNetModelTester(self )
lowerCAmelCase_ = ConfigTester(self, config_class=UpperCamelCase__, embed_dim=37, has_text_modality=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@unittest.skip(reason='''FocalNet does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''FocalNet does not use feedforward chunking''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCAmelCase_ = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
lowerCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__, nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCAmelCase_ = model_class(UpperCamelCase__ )
lowerCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ = [*signature.parameters.keys()]
lowerCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1], UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__, UpperCamelCase__ ) )
lowerCAmelCase_ = outputs.hidden_states
lowerCAmelCase_ = getattr(
self.model_tester, '''expected_num_hidden_layers''', len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCamelCase__ ), UpperCamelCase__ )
# FocalNet has a different seq_length
lowerCAmelCase_ = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [num_patches, self.model_tester.embed_dim], )
lowerCAmelCase_ = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCamelCase__ ), UpperCamelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = reshaped_hidden_states[0].shape
lowerCAmelCase_ = (
reshaped_hidden_states[0].view(UpperCamelCase__, UpperCamelCase__, height * width ).permute(0, 2, 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ), [num_patches, self.model_tester.embed_dim], )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowerCAmelCase_ = True
self.check_hidden_states_output(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ = True
self.check_hidden_states_output(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = 3
lowerCAmelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCAmelCase_ = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCAmelCase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowerCAmelCase_ = True
self.check_hidden_states_output(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ = True
self.check_hidden_states_output(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, (padded_height, padded_width) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = FocalNetModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = _config_zero_init(UpperCamelCase__ )
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(config=UpperCamelCase__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", )
@require_vision
@require_torch
class A ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''microsoft/focalnet-tiny''' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = FocalNetForImageClassification.from_pretrained('''microsoft/focalnet-tiny''' ).to(UpperCamelCase__ )
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase_ = image_processor(images=UpperCamelCase__, return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase_ = model(**UpperCamelCase__ )
# verify the logits
lowerCAmelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor([0.2_166, -0.4_368, 0.2_191] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], UpperCamelCase__, atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item(), 281 )
@require_torch
class A ( __UpperCAmelCase , unittest.TestCase ):
__snake_case = (FocalNetBackbone,) if is_torch_available() else ()
__snake_case = FocalNetConfig
__snake_case = False
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = FocalNetModelTester(self )
| 167 |
import string
def __UpperCamelCase ( _A ):
for key in range(len(string.ascii_uppercase ) ):
lowerCAmelCase_ = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
lowerCAmelCase_ = string.ascii_uppercase.find(_A )
lowerCAmelCase_ = num - key
if num < 0:
lowerCAmelCase_ = num + len(string.ascii_uppercase )
lowerCAmelCase_ = translated + string.ascii_uppercase[num]
else:
lowerCAmelCase_ = translated + symbol
print(f"Decryption using Key #{key}: {translated}" )
def __UpperCamelCase ( ):
lowerCAmelCase_ = input('''Encrypted message: ''' )
lowerCAmelCase_ = message.upper()
decrypt(_A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 167 | 1 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase = 200 ) -> int:
'''simple docstring'''
snake_case_ = [1, 2, 5, 10, 20, 50, 100, 200]
snake_case_ = [0] * (pence + 1)
snake_case_ = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__UpperCAmelCase, pence + 1, 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 7_3682
| 56 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
a : int = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
config.addinivalue_line(
'''markers''', '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''', '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''', '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''', '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''', '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''', '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
snake_case_ = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__UpperCAmelCase, id=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
if exitstatus == 5:
snake_case_ = 0
# Doctest custom flag to ignore output.
a : Union[str, Any] = doctest.register_optionflag('IGNORE_RESULT')
a : Optional[int] = doctest.OutputChecker
class a ( _lowerCamelCase ):
def A_ ( self : List[Any] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : Optional[int] ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , lowercase_ , lowercase_ , lowercase_ )
a : List[Any] = CustomOutputChecker
a : Optional[int] = HfDoctestModule
a : Tuple = HfDocTestParser
| 56 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: Dict , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: Union[str, Any] ) -> List[str]:
for attribute in key.split("." ):
_UpperCAmelCase : Optional[int] = getattr(__UpperCAmelCase , __UpperCAmelCase )
if weight_type is not None:
_UpperCAmelCase : str = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
else:
_UpperCAmelCase : Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
_UpperCAmelCase : List[Any] = value
elif weight_type == "weight_g":
_UpperCAmelCase : int = value
elif weight_type == "weight_v":
_UpperCAmelCase : Optional[int] = value
elif weight_type == "bias":
_UpperCAmelCase : int = value
else:
_UpperCAmelCase : Any = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[Any] , lowerCAmelCase: int , lowerCAmelCase: Any ) -> Optional[int]:
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Optional[Any] = fairseq_model.state_dict()
_UpperCAmelCase : Optional[Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_UpperCAmelCase : List[str] = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == "group" , )
_UpperCAmelCase : List[str] = True
else:
for key, mapped_key in MAPPING.items():
_UpperCAmelCase : List[Any] = "hubert." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or (key.split("w2v_model." )[-1] == name.split("." )[0] and not is_finetuned):
_UpperCAmelCase : List[Any] = True
if "*" in mapped_key:
_UpperCAmelCase : List[Any] = name.split(__UpperCAmelCase )[0].split("." )[-2]
_UpperCAmelCase : Tuple = mapped_key.replace("*" , __UpperCAmelCase )
if "weight_g" in name:
_UpperCAmelCase : List[str] = "weight_g"
elif "weight_v" in name:
_UpperCAmelCase : Union[str, Any] = "weight_v"
elif "weight" in name:
_UpperCAmelCase : List[str] = "weight"
elif "bias" in name:
_UpperCAmelCase : Optional[Any] = "bias"
else:
_UpperCAmelCase : Any = None
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
continue
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(F'Unused weights: {unused_weights}' )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Any , lowerCAmelCase: Tuple , lowerCAmelCase: List[Any] , lowerCAmelCase: str ) -> Optional[int]:
_UpperCAmelCase : Optional[int] = full_name.split("conv_layers." )[-1]
_UpperCAmelCase : str = name.split("." )
_UpperCAmelCase : List[Any] = int(items[0] )
_UpperCAmelCase : int = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_UpperCAmelCase : List[str] = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_UpperCAmelCase : Dict = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_UpperCAmelCase : Optional[Any] = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_UpperCAmelCase : List[Any] = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__UpperCAmelCase )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: Any , lowerCAmelCase: Tuple=None , lowerCAmelCase: Union[str, Any]=None , lowerCAmelCase: List[str]=True ) -> Optional[int]:
if config_path is not None:
_UpperCAmelCase : Optional[Any] = HubertConfig.from_pretrained(__UpperCAmelCase )
else:
_UpperCAmelCase : Any = HubertConfig()
if is_finetuned:
if dict_path:
_UpperCAmelCase : Optional[Any] = Dictionary.load(__UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_UpperCAmelCase : Dict = target_dict.pad_index
_UpperCAmelCase : str = target_dict.bos_index
_UpperCAmelCase : str = target_dict.eos_index
_UpperCAmelCase : Optional[Any] = len(target_dict.symbols )
_UpperCAmelCase : Union[str, Any] = os.path.join(__UpperCAmelCase , "vocab.json" )
if not os.path.isdir(__UpperCAmelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__UpperCAmelCase ) )
return
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , __UpperCAmelCase )
_UpperCAmelCase : str = WavaVecaCTCTokenizer(
__UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__UpperCAmelCase , )
_UpperCAmelCase : List[Any] = True if config.feat_extract_norm == "layer" else False
_UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
_UpperCAmelCase : List[Any] = WavaVecaProcessor(feature_extractor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
_UpperCAmelCase : Any = HubertForCTC(__UpperCAmelCase )
else:
_UpperCAmelCase : str = HubertModel(__UpperCAmelCase )
if is_finetuned:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_UpperCAmelCase : Union[str, Any] = model[0].eval()
recursively_load_weights(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
hf_wavavec.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 356 |
from __future__ import annotations
class a :
def __init__( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = order
# a_{0} ... a_{k}
_UpperCAmelCase : Tuple = [1.0] + [0.0] * order
# b_{0} ... b_{k}
_UpperCAmelCase : int = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_UpperCAmelCase : Optional[Any] = [0.0] * self.order
# y[n-1] ... y[n-k]
_UpperCAmelCase : Dict = [0.0] * self.order
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
if len(A_ ) < self.order:
_UpperCAmelCase : List[str] = [1.0, *a_coeffs]
if len(A_ ) != self.order + 1:
_UpperCAmelCase : List[Any] = (
f'Expected a_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(A_ )}'
)
raise ValueError(A_ )
if len(A_ ) != self.order + 1:
_UpperCAmelCase : int = (
f'Expected b_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(A_ )}'
)
raise ValueError(A_ )
_UpperCAmelCase : Optional[Any] = a_coeffs
_UpperCAmelCase : Union[str, Any] = b_coeffs
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : Tuple = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_UpperCAmelCase : Dict = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_UpperCAmelCase : Optional[Any] = self.input_history[:-1]
_UpperCAmelCase : Optional[int] = self.output_history[:-1]
_UpperCAmelCase : Optional[Any] = sample
_UpperCAmelCase : str = result
return result
| 189 | 0 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class snake_case :
"""simple docstring"""
def __init__( self : List[Any] , __A : List[Any] , __A : Any=1_3 , __A : Any=2 , __A : Tuple=2_4 , __A : Dict=1_6 , __A : List[str]=True , __A : List[Any]=True , __A : Any=3_2 , __A : List[str]=5 , __A : Optional[int]=4 , __A : List[str]=3_7 , __A : Optional[Any]="gelu" , __A : Optional[int]=0.1 , __A : Any=0.1 , __A : Union[str, Any]=1_0 , __A : List[Any]=0.02 , __A : Union[str, Any]=None , __A : Tuple=2 , __A : Any=2 , ):
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = patch_size
__UpperCamelCase = max_length
__UpperCamelCase = num_mel_bins
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = scope
__UpperCamelCase = frequency_stride
__UpperCamelCase = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__UpperCamelCase = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
__UpperCamelCase = (self.max_length - self.patch_size) // self.time_stride + 1
__UpperCamelCase = frequency_out_dimension * time_out_dimension
__UpperCamelCase = num_patches + 2
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = self.get_config()
return config, input_values, labels
def _lowerCamelCase ( self : Dict ):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _lowerCamelCase ( self : Optional[Any] , __A : Any , __A : str , __A : str ):
__UpperCamelCase = ASTModel(config=__A )
model.to(__A )
model.eval()
__UpperCamelCase = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : Any ):
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) = config_and_inputs
__UpperCamelCase = {'input_values': input_values}
return config, inputs_dict
@require_torch
class snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] =(
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : int =(
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Optional[Any] =False
SCREAMING_SNAKE_CASE_ : str =False
SCREAMING_SNAKE_CASE_ : str =False
SCREAMING_SNAKE_CASE_ : Union[str, Any] =False
def _lowerCamelCase ( self : Tuple , __A : int , __A : Optional[Any] , __A : List[str] , __A : Optional[Any] , __A : List[Any] ):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _lowerCamelCase ( self : Union[str, Any] ):
__UpperCamelCase = ASTModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=3_7 )
def _lowerCamelCase ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='AST does not use inputs_embeds' )
def _lowerCamelCase ( self : Tuple ):
pass
def _lowerCamelCase ( self : str ):
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A , nn.Linear ) )
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(__A )
__UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['input_values']
self.assertListEqual(arg_names[:1] , __A )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
@slow
def _lowerCamelCase ( self : Dict ):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = ASTModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowercase__ ( ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' )
__UpperCamelCase , __UpperCamelCase = torchaudio.load(__lowercase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCamelCase ( self : int ):
return (
ASTFeatureExtractor.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' )
if is_torchaudio_available()
else None
)
@slow
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase = self.default_feature_extractor
__UpperCamelCase = ASTForAudioClassification.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ).to(__A )
__UpperCamelCase = self.default_feature_extractor
__UpperCamelCase , __UpperCamelCase = prepare_audio()
__UpperCamelCase = audio.squeeze().numpy()
__UpperCamelCase = feature_extractor(__A , sampling_rate=__A , return_tensors='pt' ).to(__A )
# forward pass
with torch.no_grad():
__UpperCamelCase = model(**__A )
# verify the logits
__UpperCamelCase = torch.Size((1, 5_2_7) )
self.assertEqual(outputs.logits.shape , __A )
__UpperCamelCase = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1e-4 ) )
| 53 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple ="naver-clova-ix/donut-base-finetuned-docvqa"
SCREAMING_SNAKE_CASE_ : Dict =(
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
SCREAMING_SNAKE_CASE_ : List[str] ="document_qa"
SCREAMING_SNAKE_CASE_ : Union[str, Any] =AutoProcessor
SCREAMING_SNAKE_CASE_ : Union[str, Any] =VisionEncoderDecoderModel
SCREAMING_SNAKE_CASE_ : List[Any] =["image", "text"]
SCREAMING_SNAKE_CASE_ : Any =["text"]
def __init__( self : Optional[int] , *__A : List[str] , **__A : List[Any] ):
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*__A , **__A )
def _lowerCamelCase ( self : Any , __A : "Image" , __A : str ):
__UpperCamelCase = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
__UpperCamelCase = task_prompt.replace('{user_input}' , __A )
__UpperCamelCase = self.pre_processor.tokenizer(
__A , add_special_tokens=__A , return_tensors='pt' ).input_ids
__UpperCamelCase = self.pre_processor(__A , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _lowerCamelCase ( self : Union[str, Any] , __A : Optional[Any] ):
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__A , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__A , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__A , ).sequences
def _lowerCamelCase ( self : Tuple , __A : List[Any] ):
__UpperCamelCase = self.pre_processor.batch_decode(__A )[0]
__UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
__UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
__UpperCamelCase = re.sub(R'<.*?>' , '' , __A , count=1 ).strip() # remove first task start token
__UpperCamelCase = self.pre_processor.tokenajson(__A )
return sequence["answer"]
| 53 | 1 |
'''simple docstring'''
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
_lowercase : float
_lowercase : TreeNode | None = None
_lowercase : TreeNode | None = None
def A (__lowerCamelCase :TreeNode | None ):
# Validation
def is_valid_tree(__lowerCamelCase :TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(__lowerCamelCase ):
raise ValueError(
"""Each node should be type of TreeNode and data should be float.""" )
def is_binary_search_tree_recursive_check(
__lowerCamelCase :TreeNode | None , __lowerCamelCase :float , __lowerCamelCase :float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , __lowerCamelCase , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , __lowerCamelCase )
)
return is_binary_search_tree_recursive_check(__lowerCamelCase , -float("""inf""" ) , float("""inf""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _lowercase , _lowercase ):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
_lowerCAmelCase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_lowercase , scheduler=_lowercase )
@torch.no_grad()
def __call__( self , _lowercase = 1 , _lowercase = None , _lowercase = 0.0 , _lowercase = 50 , _lowercase = None , _lowercase = "pil" , _lowercase = True , ):
"""simple docstring"""
if isinstance(self.unet.config.sample_size , _lowercase ):
_lowerCAmelCase = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
_lowerCAmelCase = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
_lowerCAmelCase = randn_tensor(_lowercase , generator=_lowercase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_lowerCAmelCase = self.unet(_lowercase , _lowercase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_lowerCAmelCase = self.scheduler.step(
_lowercase , _lowercase , _lowercase , eta=_lowercase , use_clipped_model_output=_lowercase , generator=_lowercase ).prev_sample
_lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 229 | 0 |
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self,__lowerCamelCase,__lowerCamelCase=7,__lowerCamelCase=3,__lowerCamelCase=18,__lowerCamelCase=30,__lowerCamelCase=400,__lowerCamelCase=True,__lowerCamelCase=None,__lowerCamelCase=True,__lowerCamelCase=[0.5, 0.5, 0.5],__lowerCamelCase=[0.5, 0.5, 0.5],):
A__ = size if size is not None else {'''height''': 18, '''width''': 18}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = image_size
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
def UpperCamelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = DPTImageProcessor if is_vision_available() else None
def UpperCamelCase ( self ):
A__ = DPTImageProcessingTester(self )
@property
def UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self ):
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase,'''image_mean''' ) )
self.assertTrue(hasattr(__lowerCamelCase,'''image_std''' ) )
self.assertTrue(hasattr(__lowerCamelCase,'''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase,'''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase,'''size''' ) )
def UpperCamelCase ( self ):
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size,{'''height''': 18, '''width''': 18} )
A__ = self.image_processing_class.from_dict(self.image_processor_dict,size=42 )
self.assertEqual(image_processor.size,{'''height''': 42, '''width''': 42} )
def UpperCamelCase ( self ):
# Initialize image_processing
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester,equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase,Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0],return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
),)
# Test batched
A__ = image_processing(__lowerCamelCase,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
),)
def UpperCamelCase ( self ):
# Initialize image_processing
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester,equal_resolution=__lowerCamelCase,numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase,np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0],return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
),)
# Test batched
A__ = image_processing(__lowerCamelCase,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
),)
def UpperCamelCase ( self ):
# Initialize image_processing
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester,equal_resolution=__lowerCamelCase,torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase,torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0],return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
),)
# Test batched
A__ = image_processing(__lowerCamelCase,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
),)
| 193 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = (DDIMParallelScheduler,)
__SCREAMING_SNAKE_CASE = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def UpperCamelCase ( self,**__lowerCamelCase ):
A__ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**__lowerCamelCase )
return config
def UpperCamelCase ( self,**__lowerCamelCase ):
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(**__lowerCamelCase )
A__ = scheduler_class(**__lowerCamelCase )
A__ , A__ = 10, 0.0
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCamelCase )
for t in scheduler.timesteps:
A__ = model(__lowerCamelCase,__lowerCamelCase )
A__ = scheduler.step(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ).prev_sample
return sample
def UpperCamelCase ( self ):
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def UpperCamelCase ( self ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowerCamelCase )
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(steps_offset=1 )
A__ = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps,torch.LongTensor([801, 601, 401, 201, 1] ) )
def UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1],[0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCamelCase,beta_end=__lowerCamelCase )
def UpperCamelCase ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCamelCase )
def UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def UpperCamelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCamelCase )
def UpperCamelCase ( self ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__lowerCamelCase )
def UpperCamelCase ( self ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__lowerCamelCase )
def UpperCamelCase ( self ):
self.check_over_configs(thresholding=__lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCamelCase,prediction_type=__lowerCamelCase,sample_max_value=__lowerCamelCase,)
def UpperCamelCase ( self ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=__lowerCamelCase )
def UpperCamelCase ( self ):
for t, num_inference_steps in zip([1, 10, 50],[10, 50, 500] ):
self.check_over_forward(time_step=__lowerCamelCase,num_inference_steps=__lowerCamelCase )
def UpperCamelCase ( self ):
for t, eta in zip([1, 10, 49],[0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__lowerCamelCase,eta=__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**__lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0,0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420,400 ) - 0.14771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980,960 ) - 0.32460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0,0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487,486 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999,998 ) - 0.02 ) ) < 1E-5
def UpperCamelCase ( self ):
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**__lowerCamelCase )
A__ , A__ = 10, 0.0
scheduler.set_timesteps(__lowerCamelCase )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
A__ = self.dummy_sample_deter + 0.1
A__ = self.dummy_sample_deter - 0.1
A__ = samplea.shape[0]
A__ = torch.stack([samplea, samplea, samplea],dim=0 )
A__ = torch.arange(__lowerCamelCase )[0:3, None].repeat(1,__lowerCamelCase )
A__ = model(samples.flatten(0,1 ),timesteps.flatten(0,1 ) )
A__ = scheduler.batch_step_no_noise(__lowerCamelCase,timesteps.flatten(0,1 ),samples.flatten(0,1 ),__lowerCamelCase )
A__ = torch.sum(torch.abs(__lowerCamelCase ) )
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.4982 ) < 1E-3
def UpperCamelCase ( self ):
A__ = self.full_loop()
A__ = torch.sum(torch.abs(__lowerCamelCase ) )
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.223967 ) < 1E-3
def UpperCamelCase ( self ):
A__ = self.full_loop(prediction_type='''v_prediction''' )
A__ = torch.sum(torch.abs(__lowerCamelCase ) )
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 52.5302 ) < 1E-2
assert abs(result_mean.item() - 0.0684 ) < 1E-3
def UpperCamelCase ( self ):
# We specify different beta, so that the first alpha is 0.99
A__ = self.full_loop(set_alpha_to_one=__lowerCamelCase,beta_start=0.01 )
A__ = torch.sum(torch.abs(__lowerCamelCase ) )
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.1951 ) < 1E-3
def UpperCamelCase ( self ):
# We specify different beta, so that the first alpha is 0.99
A__ = self.full_loop(set_alpha_to_one=__lowerCamelCase,beta_start=0.01 )
A__ = torch.sum(torch.abs(__lowerCamelCase ) )
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.1941 ) < 1E-3
| 193 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int ) -> int:
"""simple docstring"""
a : Any = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
a : Any = n - k
# Calculate C(n,k)
for i in range(snake_case ):
result *= n - i
result //= i + 1
return result
def SCREAMING_SNAKE_CASE__ ( snake_case : int ) -> int:
"""simple docstring"""
return binomial_coefficient(2 * node_count , snake_case ) // (node_count + 1)
def SCREAMING_SNAKE_CASE__ ( snake_case : int ) -> int:
"""simple docstring"""
if n < 0:
raise ValueError('factorial() not defined for negative values' )
a : Dict = 1
for i in range(1 , n + 1 ):
result *= i
return result
def SCREAMING_SNAKE_CASE__ ( snake_case : int ) -> int:
"""simple docstring"""
return catalan_number(snake_case ) * factorial(snake_case )
if __name__ == "__main__":
UpperCamelCase : Tuple = int(input("""Enter the number of nodes: """).strip() or 0)
if node_count <= 0:
raise ValueError("""We need some nodes to work with.""")
print(
f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
f'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 362 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : Tuple = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 345 | 0 |
"""simple docstring"""
import math
import unittest
def lowerCamelCase__ ( _lowerCamelCase : int ) -> bool:
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class a ( unittest.TestCase ):
def UpperCamelCase ( self : Optional[Any] ) -> int:
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def UpperCamelCase ( self : List[str] ) -> str:
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1 ) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 183 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase : int ) -> list[int]:
lowerCamelCase_ = [True] * limit
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowerCamelCase_ = i * 2
while index < limit:
lowerCamelCase_ = False
lowerCamelCase_ = index + i
lowerCamelCase_ = [2]
for i in range(3 , _lowerCamelCase , 2 ):
if is_prime[i]:
primes.append(_lowerCamelCase )
return primes
def lowerCamelCase__ ( _lowerCamelCase : int = 1000000 ) -> int:
lowerCamelCase_ = prime_sieve(_lowerCamelCase )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for i in range(len(_lowerCamelCase ) ):
for j in range(i + length , len(_lowerCamelCase ) ):
lowerCamelCase_ = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowerCamelCase_ = j - i
lowerCamelCase_ = sol
return largest
if __name__ == "__main__":
print(F'''{solution() = }''')
| 183 | 1 |
lowerCAmelCase_ = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowerCAmelCase_ = concatenate_datasets
lowerCAmelCase_ = DownloadConfig
lowerCAmelCase_ = DownloadManager
lowerCAmelCase_ = DownloadMode
lowerCAmelCase_ = DownloadConfig
lowerCAmelCase_ = DownloadMode
lowerCAmelCase_ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 362 |
from __future__ import annotations
lowerCAmelCase_ = """#"""
class _lowerCAmelCase :
'''simple docstring'''
def __init__( self : Any ):
'''simple docstring'''
_snake_case : dict = {}
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : str ):
'''simple docstring'''
_snake_case : List[Any] = self._trie
for char in text:
if char not in trie:
_snake_case : int = {}
_snake_case : int = trie[char]
_snake_case : Optional[Any] = True
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
_snake_case : Optional[int] = self._trie
for char in prefix:
if char in trie:
_snake_case : Optional[Any] = trie[char]
else:
return []
return self._elements(UpperCamelCase )
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : dict ):
'''simple docstring'''
_snake_case : int = []
for c, v in d.items():
_snake_case : Dict = [' '] if c == END else [(c + s) for s in self._elements(UpperCamelCase )]
result.extend(UpperCamelCase )
return tuple(UpperCamelCase )
lowerCAmelCase_ = Trie()
lowerCAmelCase_ = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def lowerCamelCase_ ( lowerCAmelCase: str )-> tuple:
_snake_case : List[Any] = trie.find_word(lowerCAmelCase )
return tuple(string + word for word in suffixes )
def lowerCamelCase_ ( )-> None:
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 260 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.