code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = '▁'
__UpperCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'}
__UpperCAmelCase = {
'vocab_file': {
'facebook/mbart-large-50-one-to-many-mmt': (
'https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'
),
}
}
__UpperCAmelCase = {
'facebook/mbart-large-50-one-to-many-mmt': 1024,
}
# fmt: off
__UpperCAmelCase = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN', 'af_ZA', 'az_AZ', 'bn_IN', 'fa_IR', 'he_IL', 'hr_HR', 'id_ID', 'ka_GE', 'km_KH', 'mk_MK', 'ml_IN', 'mn_MN', 'mr_IN', 'pl_PL', 'ps_AF', 'pt_XX', 'sv_SE', 'sw_KE', 'ta_IN', 'te_IN', 'th_TH', 'tl_XX', 'uk_UA', 'ur_PK', 'xh_ZA', 'gl_ES', 'sl_SI']
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Union[str, Any] = VOCAB_FILES_NAMES
_snake_case : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : str = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Union[str, Any] = ['''input_ids''', '''attention_mask''']
_snake_case : List[int] = []
_snake_case : List[int] = []
def __init__( self , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase="</s>" , _UpperCamelCase="</s>" , _UpperCamelCase="<s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase="<mask>" , _UpperCamelCase = None , **_UpperCamelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : List[str] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token
UpperCAmelCase_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase_ : str = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_UpperCamelCase , tgt_lang=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
UpperCAmelCase_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCamelCase ) )
UpperCAmelCase_ : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase_ : Optional[int] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : Union[str, Any] = len(self.sp_model )
UpperCAmelCase_ : Optional[Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_UpperCamelCase )
}
UpperCAmelCase_ : int = {v: k for k, v in self.lang_code_to_id.items()}
UpperCAmelCase_ : Optional[int] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCAmelCase_ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCAmelCase_ : Optional[int] = src_lang if src_lang is not None else 'en_XX'
UpperCAmelCase_ : Tuple = self.lang_code_to_id[self._src_lang]
UpperCAmelCase_ : Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCAmelCase ( self ) -> int:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __UpperCAmelCase ( self ) -> str:
return self._src_lang
@src_lang.setter
def __UpperCAmelCase ( self , _UpperCamelCase ) -> None:
UpperCAmelCase_ : Optional[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Dict:
UpperCAmelCase_ : List[str] = self.__dict__.copy()
UpperCAmelCase_ : str = None
return state
def __setstate__( self , _UpperCamelCase ) -> None:
UpperCAmelCase_ : Optional[int] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCAmelCase_ : List[str] = {}
UpperCAmelCase_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : List[str] = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[str]:
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase_ : int = self.sp_model.PieceToId(_UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __UpperCAmelCase ( self , _UpperCamelCase ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Tuple:
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : List[Any] = ''
UpperCAmelCase_ : Optional[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCamelCase ) + token
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Union[str, Any] = []
else:
current_sub_tokens.append(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = False
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(_UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase_ : Tuple = os.path.join(
_UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , 'wb' ) as fi:
UpperCAmelCase_ : Tuple = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
UpperCAmelCase_ : List[str] = [1] * len(self.prefix_tokens )
UpperCAmelCase_ : Optional[int] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCamelCase )) + ([0] * len(_UpperCamelCase )) + suffix_ones
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
UpperCAmelCase_ : str = src_lang
UpperCAmelCase_ : Any = self(_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : List[Any] = self.convert_tokens_to_ids(_UpperCamelCase )
UpperCAmelCase_ : Any = tgt_lang_id
return inputs
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = "en_XX" , _UpperCamelCase = None , _UpperCamelCase = "ro_RO" , **_UpperCamelCase , ) -> BatchEncoding:
UpperCAmelCase_ : Optional[int] = src_lang
UpperCAmelCase_ : Optional[Any] = tgt_lang
return super().prepare_seqaseq_batch(_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCAmelCase ( self ) -> Tuple:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> None:
UpperCAmelCase_ : str = self.lang_code_to_id[src_lang]
UpperCAmelCase_ : Union[str, Any] = [self.cur_lang_code_id]
UpperCAmelCase_ : Union[str, Any] = [self.eos_token_id]
def __UpperCAmelCase ( self , _UpperCamelCase ) -> None:
UpperCAmelCase_ : int = self.lang_code_to_id[tgt_lang]
UpperCAmelCase_ : str = [self.cur_lang_code_id]
UpperCAmelCase_ : List[str] = [self.eos_token_id]
| 29 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int:
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
self.check_model_type(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = {}, {}
if padding is not None:
UpperCAmelCase_ : List[str] = padding
if truncation is not None:
UpperCAmelCase_ : Tuple = truncation
if top_k is not None:
UpperCAmelCase_ : Dict = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> int:
if isinstance(_UpperCamelCase , (Image.Image, str) ) and isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Optional[Any] = {'image': image, 'question': question}
else:
UpperCAmelCase_ : List[str] = image
UpperCAmelCase_ : Optional[Any] = super().__call__(_UpperCamelCase , **_UpperCamelCase )
return results
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = load_image(inputs['image'] )
UpperCAmelCase_ : Dict = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=_UpperCamelCase , truncation=_UpperCamelCase )
UpperCAmelCase_ : int = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework )
model_inputs.update(_UpperCamelCase )
return model_inputs
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : Any = self.model(**_UpperCamelCase )
return model_outputs
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=5 ) -> str:
if top_k > self.model.config.num_labels:
UpperCAmelCase_ : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ : List[str] = model_outputs.logits.sigmoid()[0]
UpperCAmelCase_ , UpperCAmelCase_ : str = probs.topk(_UpperCamelCase )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
UpperCAmelCase_ : Optional[Any] = scores.tolist()
UpperCAmelCase_ : Tuple = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase )]
| 29 | 1 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = 10
UpperCAmelCase_ : Tuple = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
UpperCAmelCase_ : Tuple = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(__snake_case ) ),
} , features=__snake_case , )
return dataset
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : str = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=__snake_case )
return filename
# FILE_CONTENT + files
__UpperCAmelCase = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt'
UpperCAmelCase_ : Tuple = FILE_CONTENT
with open(__snake_case , 'w' ) as f:
f.write(__snake_case )
return filename
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
import bza
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
UpperCAmelCase_ : str = bytes(__snake_case , 'utf-8' )
with bza.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
UpperCAmelCase_ : Dict = bytes(__snake_case , 'utf-8' )
with gzip.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' )
with lza.frame.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : List[Any] ):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(__snake_case , 'w' ) as archive:
archive.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any] ):
'''simple docstring'''
import tarfile
UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
import lzma
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' )
with lzma.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[int] , __snake_case : Optional[Any] ):
'''simple docstring'''
import zipfile
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
UpperCAmelCase_ : Tuple = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
UpperCAmelCase_ : List[str] = bytes(__snake_case , 'utf-8' )
with zstd.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.xml'
UpperCAmelCase_ : List[Any] = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(__snake_case , 'w' ) as f:
f.write(__snake_case )
return filename
__UpperCAmelCase = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
__UpperCAmelCase = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
__UpperCAmelCase = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
__UpperCAmelCase = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
__UpperCAmelCase = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = datasets.Dataset.from_dict(__snake_case )
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(__snake_case ) ) as con:
UpperCAmelCase_ : List[Any] = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(__snake_case , 'w' , newline='' ) as f:
UpperCAmelCase_ : Tuple = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(__snake_case , 'w' , newline='' ) as f:
UpperCAmelCase_ : Optional[Any] = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Any ):
'''simple docstring'''
import bza
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(__snake_case , 'rb' ) as f:
UpperCAmelCase_ : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] , __snake_case : Tuple , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(__snake_case , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : int , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
UpperCAmelCase_ : Dict = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(__snake_case , 'wb' ) as f:
UpperCAmelCase_ : List[Any] = pq.ParquetWriter(__snake_case , schema=__snake_case )
UpperCAmelCase_ : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__snake_case ) )] for k in DATA[0]} , schema=__snake_case )
writer.write_table(__snake_case )
writer.close()
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
UpperCAmelCase_ : Optional[int] = {'data': DATA}
with open(__snake_case , 'w' ) as f:
json.dump(__snake_case , __snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
UpperCAmelCase_ : Tuple = {'data': DATA_DICT_OF_LISTS}
with open(__snake_case , 'w' ) as f:
json.dump(__snake_case , __snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : Dict ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(__snake_case , 'rb' ) as orig_file:
with gzip.open(__snake_case , 'wb' ) as zipped_file:
zipped_file.writelines(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int , __snake_case : Any ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(__snake_case , 'rb' ) as orig_file:
with gzip.open(__snake_case , 'wb' ) as zipped_file:
zipped_file.writelines(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : str , __snake_case : Dict , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : str , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Any , __snake_case : Any , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Any = ['0', '1', '2', '3']
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = ['0', '1', '2', '3']
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ['0', '1', '2', '3']
UpperCAmelCase_ : List[str] = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : str , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename('unsupported.ext' ) )
f.write(__snake_case , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(__snake_case , 'w' , encoding='utf-8' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 29 |
import os
# Precomputes a list of the 100 first triangular numbers
__UpperCAmelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Any = os.path.dirname(os.path.realpath(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = os.path.join(__snake_case , 'words.txt' )
UpperCAmelCase_ : Union[str, Any] = ''
with open(__snake_case ) as f:
UpperCAmelCase_ : List[Any] = f.readline()
UpperCAmelCase_ : Optional[int] = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
UpperCAmelCase_ : Optional[int] = [
word
for word in [sum(ord(__snake_case ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__snake_case )
if __name__ == "__main__":
print(solution())
| 29 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Tuple = '''vivit'''
def __init__( self , _UpperCamelCase=2_2_4 , _UpperCamelCase=3_2 , _UpperCamelCase=[2, 1_6, 1_6] , _UpperCamelCase=3 , _UpperCamelCase=7_6_8 , _UpperCamelCase=1_2 , _UpperCamelCase=1_2 , _UpperCamelCase=3_0_7_2 , _UpperCamelCase="gelu_fast" , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=0.02 , _UpperCamelCase=1E-06 , _UpperCamelCase=True , **_UpperCamelCase , ) -> int:
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : Any = intermediate_size
UpperCAmelCase_ : Optional[int] = hidden_act
UpperCAmelCase_ : List[Any] = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : List[str] = layer_norm_eps
UpperCAmelCase_ : List[Any] = image_size
UpperCAmelCase_ : Dict = num_frames
UpperCAmelCase_ : int = tubelet_size
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : str = qkv_bias
super().__init__(**_UpperCamelCase )
| 29 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__UpperCAmelCase = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__UpperCAmelCase = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
if "://" in dataset_path:
UpperCAmelCase_ : int = dataset_path.split('://' )[1]
return dataset_path
def lowercase__ ( __snake_case : fsspec.AbstractFileSystem ):
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( __snake_case : fsspec.AbstractFileSystem , __snake_case : str , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = not is_remote_filesystem(__snake_case )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__snake_case ) , fs._strip_protocol(__snake_case ) )
else:
fs.mv(__snake_case , __snake_case , recursive=__snake_case )
def lowercase__ ( ):
'''simple docstring'''
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : int = threading.Lock()
| 29 | 1 |
from itertools import count
def lowercase__ ( __snake_case : int = 50 ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = [1] * min_block_length
for n in count(__snake_case ):
fill_count_functions.append(1 )
for block_length in range(__snake_case , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_000_000:
break
return n
if __name__ == "__main__":
print(F'{solution() = }')
| 29 |
def lowercase__ ( __snake_case : list ):
'''simple docstring'''
for i in range(len(__snake_case ) - 1 , 0 , -1 ):
UpperCAmelCase_ : Dict = False
for j in range(__snake_case , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Any = unsorted[j - 1], unsorted[j]
UpperCAmelCase_ : int = True
for j in range(__snake_case ):
if unsorted[j] > unsorted[j + 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = unsorted[j + 1], unsorted[j]
UpperCAmelCase_ : Any = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip()
__UpperCAmelCase = [int(item) for item in user_input.split(',')]
print(F'{cocktail_shaker_sort(unsorted) = }')
| 29 | 1 |
# Lint as: python3
import itertools
import os
import re
__UpperCAmelCase = re.compile(R'([A-Z]+)([A-Z][a-z])')
__UpperCAmelCase = re.compile(R'([a-z\d])([A-Z])')
__UpperCAmelCase = re.compile(R'(?<!_)_(?!_)')
__UpperCAmelCase = re.compile(R'(_{2,})')
__UpperCAmelCase = R'^\w+(\.\w+)*$'
__UpperCAmelCase = R'<>:/\|?*'
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = _uppercase_uppercase_re.sub(R'\1_\2' , __snake_case )
UpperCAmelCase_ : List[Any] = _lowercase_uppercase_re.sub(R'\1_\2' , __snake_case )
return name.lower()
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = _single_underscore_re.split(__snake_case )
UpperCAmelCase_ : Union[str, Any] = [_multiple_underscores_re.split(__snake_case ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__snake_case ) if n != '' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
if os.path.basename(__snake_case ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(__snake_case )
def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : Union[str, Any] ):
'''simple docstring'''
if os.path.basename(__snake_case ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , __snake_case ):
raise ValueError(F"Split name should match '{_split_re}'' but got '{split}'." )
return F"{filename_prefix_for_name(__snake_case )}-{split}"
def lowercase__ ( __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : List[Any]=None ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = filename_prefix_for_split(__snake_case , __snake_case )
if filetype_suffix:
prefix += F".{filetype_suffix}"
UpperCAmelCase_ : List[str] = os.path.join(__snake_case , __snake_case )
return F"{filepath}*"
def lowercase__ ( __snake_case : Tuple , __snake_case : str , __snake_case : Optional[Any] , __snake_case : List[Any]=None , __snake_case : List[str]=None ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = filename_prefix_for_split(__snake_case , __snake_case )
UpperCAmelCase_ : List[str] = os.path.join(__snake_case , __snake_case )
if shard_lengths:
UpperCAmelCase_ : int = len(__snake_case )
UpperCAmelCase_ : Dict = [F"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(__snake_case )]
if filetype_suffix:
UpperCAmelCase_ : str = [filename + F".{filetype_suffix}" for filename in filenames]
return filenames
else:
UpperCAmelCase_ : List[str] = prefix
if filetype_suffix:
filename += F".{filetype_suffix}"
return [filename]
| 29 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def lowercase__ ( __snake_case : List[str] , __snake_case : int , __snake_case : Tuple=8 ):
'''simple docstring'''
UpperCAmelCase_ : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase_ : List[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowercase__ ( __snake_case : Any , __snake_case : int=512 , __snake_case : Dict=512 ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCAmelCase_ : Dict = np.array(pil_image.convert('RGB' ) )
UpperCAmelCase_ : Any = arr.astype(np.floataa ) / 127.5 - 1
UpperCAmelCase_ : Dict = np.transpose(__snake_case , [2, 0, 1] )
UpperCAmelCase_ : List[str] = torch.from_numpy(__snake_case ).unsqueeze(0 )
return image
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
unet=_UpperCamelCase , scheduler=_UpperCamelCase , movq=_UpperCamelCase , )
UpperCAmelCase_ : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
# get the original timestep using init_timestep
UpperCAmelCase_ : Any = min(int(num_inference_steps * strength ) , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase_ : str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> Tuple:
if not isinstance(_UpperCamelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_UpperCamelCase )}" )
UpperCAmelCase_ : List[str] = image.to(device=_UpperCamelCase , dtype=_UpperCamelCase )
UpperCAmelCase_ : List[str] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCAmelCase_ : List[str] = image
else:
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_UpperCamelCase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Any = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCamelCase )
]
UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase , dim=0 )
else:
UpperCAmelCase_ : Union[str, Any] = self.movq.encode(_UpperCamelCase ).latent_dist.sample(_UpperCamelCase )
UpperCAmelCase_ : int = self.movq.config.scaling_factor * init_latents
UpperCAmelCase_ : Optional[int] = torch.cat([init_latents] , dim=0 )
UpperCAmelCase_ : Tuple = init_latents.shape
UpperCAmelCase_ : List[Any] = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
# get latents
UpperCAmelCase_ : str = self.scheduler.add_noise(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = init_latents
return latents
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Any:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase_ : Optional[Any] = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase_ : str = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = cpu_offload_with_hook(_UpperCamelCase , _UpperCamelCase , prev_module_hook=_UpperCamelCase )
# We'll offload the last model manually.
UpperCAmelCase_ : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self ) -> Dict:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_UpperCamelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 1_0_0 , _UpperCamelCase = 4.0 , _UpperCamelCase = 0.3 , _UpperCamelCase = 1 , _UpperCamelCase = None , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> str:
UpperCAmelCase_ : Any = self._execution_device
UpperCAmelCase_ : Union[str, Any] = guidance_scale > 1.0
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : str = torch.cat(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[Any] = image_embeds.shape[0]
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : int = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : int = negative_image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_UpperCamelCase )
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Tuple = [image]
if not all(isinstance(_UpperCamelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(_UpperCamelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
UpperCAmelCase_ : str = torch.cat([prepare_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) for i in image] , dim=0 )
UpperCAmelCase_ : Any = image.to(dtype=image_embeds.dtype , device=_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.movq.encode(_UpperCamelCase )['latents']
UpperCAmelCase_ : List[Any] = latents.repeat_interleave(_UpperCamelCase , dim=0 )
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.get_timesteps(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCAmelCase_ , UpperCAmelCase_ : str = downscale_height_and_width(_UpperCamelCase , _UpperCamelCase , self.movq_scale_factor )
UpperCAmelCase_ : Dict = self.prepare_latents(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : str = {'image_embeds': image_embeds}
UpperCAmelCase_ : Union[str, Any] = self.unet(
sample=_UpperCamelCase , timestep=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , added_cond_kwargs=_UpperCamelCase , return_dict=_UpperCamelCase , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 )
UpperCAmelCase_ , UpperCAmelCase_ : str = variance_pred.chunk(2 )
UpperCAmelCase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ , UpperCAmelCase_ : int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : List[str] = self.scheduler.step(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase , )[0]
# post-processing
UpperCAmelCase_ : Optional[Any] = self.movq.decode(_UpperCamelCase , force_not_quantize=_UpperCamelCase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCAmelCase_ : List[str] = image * 0.5 + 0.5
UpperCAmelCase_ : List[Any] = image.clamp(0 , 1 )
UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : List[Any] = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCamelCase )
| 29 | 1 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase (_snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : int = RobertaTokenizer
_snake_case : Any = RobertaTokenizerFast
_snake_case : Optional[int] = True
_snake_case : List[Any] = {'''cls_token''': '''<s>'''}
def __UpperCAmelCase ( self ) -> int:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ : List[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
UpperCAmelCase_ : Union[str, Any] = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
UpperCAmelCase_ : Tuple = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
UpperCAmelCase_ : Union[str, Any] = {'unk_token': '<unk>'}
UpperCAmelCase_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_UpperCamelCase ) )
def __UpperCAmelCase ( self , **_UpperCamelCase ) -> str:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __UpperCAmelCase ( self , **_UpperCamelCase ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : List[Any] = 'lower newer'
UpperCAmelCase_ : Optional[int] = 'lower newer'
return input_text, output_text
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : Dict = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase_ : Optional[Any] = 'lower newer'
UpperCAmelCase_ : Optional[Any] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
UpperCAmelCase_ : Optional[int] = tokenizer.tokenize(_UpperCamelCase ) # , add_prefix_space=True)
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Tuple = tokens + [tokenizer.unk_token]
UpperCAmelCase_ : Optional[Any] = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : Dict = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=_UpperCamelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=_UpperCamelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained('roberta-base' )
UpperCAmelCase_ : str = tokenizer.encode('sequence builders' , add_special_tokens=_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = tokenizer.encode('multi-sequence build' , add_special_tokens=_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = tokenizer.encode(
'sequence builders' , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase )
UpperCAmelCase_ : Tuple = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase )
UpperCAmelCase_ : int = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase )
UpperCAmelCase_ : Dict = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase_ : Any = 'Encode this sequence.'
UpperCAmelCase_ : int = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
UpperCAmelCase_ : Optional[Any] = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase )
UpperCAmelCase_ : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : str = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
UpperCAmelCase_ : List[Any] = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_UpperCamelCase , _UpperCamelCase )
# Testing spaces after special tokens
UpperCAmelCase_ : Any = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase )} ) # mask token has a left space
UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = 'Encode <mask> sequence'
UpperCAmelCase_ : Union[str, Any] = 'Encode <mask>sequence'
UpperCAmelCase_ : int = tokenizer.encode(_UpperCamelCase )
UpperCAmelCase_ : Any = encoded.index(_UpperCamelCase )
UpperCAmelCase_ : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Dict = tokenizer.encode(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = encoded.index(_UpperCamelCase )
UpperCAmelCase_ : Dict = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> int:
pass
def __UpperCAmelCase ( self ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase_ : int = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : Any = 'A, <mask> AllenNLP sentence.'
UpperCAmelCase_ : Optional[int] = tokenizer_r.encode_plus(_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_token_type_ids=_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = tokenizer_p.encode_plus(_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_token_type_ids=_UpperCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
UpperCAmelCase_ : List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
UpperCAmelCase_ : Optional[int] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
_UpperCamelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_UpperCamelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
UpperCAmelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
UpperCAmelCase_ : str = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , _UpperCamelCase )
self.assertEqual(post_processor_state['add_prefix_space'] , _UpperCamelCase )
self.assertEqual(post_processor_state['trim_offsets'] , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[str]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase_ : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
UpperCAmelCase_ : Any = f"{text_of_1_token} {text_of_1_token}"
UpperCAmelCase_ : Dict = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
UpperCAmelCase_ : int = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ) + 1, len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
UpperCAmelCase_ : List[Any] = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
UpperCAmelCase_ : Any = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ) + 1, len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
UpperCAmelCase_ : Tuple = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
UpperCAmelCase_ : str = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ), len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
UpperCAmelCase_ : int = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ), len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
UpperCAmelCase_ : List[Any] = f" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
UpperCAmelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCamelCase ) + 1, 1 + len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
UpperCAmelCase_ : Tuple = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
UpperCAmelCase_ : Dict = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCamelCase ), 1 + len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
UpperCAmelCase_ : Any = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
UpperCAmelCase_ : Dict = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCamelCase ), 1 + len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
| 29 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowercase__ ( __snake_case : List[Any] , __snake_case : List[str]=False ):
'''simple docstring'''
try:
UpperCAmelCase_ : int = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCAmelCase_ : Optional[int] = default
else:
# KEY is set, convert it to True or False.
try:
UpperCAmelCase_ : List[Any] = strtobool(__snake_case )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
__UpperCAmelCase = parse_flag_from_env('RUN_SLOW', default=False)
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
return unittest.skip('Test was skipped' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(__snake_case )
def lowercase__ ( __snake_case : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(__snake_case )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(__snake_case )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(__snake_case )
def lowercase__ ( __snake_case : Dict=None , __snake_case : Dict=None ):
'''simple docstring'''
if test_case is None:
return partial(__snake_case , version=__snake_case )
return unittest.skipUnless(is_torch_version('>=' , __snake_case ) , F"test requires torch version >= {version}" )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(__snake_case )
__UpperCAmelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(__snake_case )
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
_snake_case : Union[str, Any] = True
@classmethod
def __UpperCAmelCase ( cls ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = tempfile.mkdtemp()
@classmethod
def __UpperCAmelCase ( cls ) -> List[str]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __UpperCAmelCase ( self ) -> str:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_UpperCamelCase )
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Optional[int]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : List[Any] = mocks if isinstance(_UpperCamelCase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = AcceleratorState()
UpperCAmelCase_ : str = tensor[None].clone().to(state.device )
UpperCAmelCase_ : List[str] = gather(__snake_case ).cpu()
UpperCAmelCase_ : List[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __snake_case ):
return False
return True
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : str = returncode
UpperCAmelCase_ : Optional[Any] = stdout
UpperCAmelCase_ : Optional[Any] = stderr
async def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Optional[int] ):
'''simple docstring'''
while True:
UpperCAmelCase_ : Dict = await stream.readline()
if line:
callback(__snake_case )
else:
break
async def lowercase__ ( __snake_case : Optional[int] , __snake_case : Dict=None , __snake_case : str=None , __snake_case : Dict=None , __snake_case : List[str]=False , __snake_case : Optional[int]=False ):
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__snake_case , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__snake_case , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : str = []
def tee(__snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Optional[int]="" ):
UpperCAmelCase_ : List[str] = line.decode('utf-8' ).rstrip()
sink.append(__snake_case )
if not quiet:
print(__snake_case , __snake_case , file=__snake_case )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __snake_case : tee(__snake_case , __snake_case , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __snake_case : tee(__snake_case , __snake_case , sys.stderr , label='stderr:' ) ) ),
] , timeout=__snake_case , )
return _RunOutput(await p.wait() , __snake_case , __snake_case )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[Any]=None , __snake_case : str=None , __snake_case : Tuple=180 , __snake_case : Dict=False , __snake_case : Optional[Any]=True ):
'''simple docstring'''
UpperCAmelCase_ : str = asyncio.get_event_loop()
UpperCAmelCase_ : int = loop.run_until_complete(
_stream_subprocess(__snake_case , env=__snake_case , stdin=__snake_case , timeout=__snake_case , quiet=__snake_case , echo=__snake_case ) )
UpperCAmelCase_ : int = ' '.join(__snake_case )
if result.returncode > 0:
UpperCAmelCase_ : int = '\n'.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class lowerCamelCase (_snake_case ):
'''simple docstring'''
pass
def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any]=False ):
'''simple docstring'''
try:
UpperCAmelCase_ : List[Any] = subprocess.check_output(__snake_case , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__snake_case , 'decode' ):
UpperCAmelCase_ : str = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(__snake_case )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 29 | 1 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __UpperCAmelCase ( self , _UpperCamelCase ) -> float:
return 0.0
def lowercase__ ( __snake_case : np.ndarray , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
UpperCAmelCase_ : Dict = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def lowercase__ ( __snake_case : FilterType , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = 512
UpperCAmelCase_ : str = [1] + [0] * (size - 1)
UpperCAmelCase_ : Optional[Any] = [filter_type.process(__snake_case ) for item in inputs]
UpperCAmelCase_ : Dict = [0] * (samplerate - size) # zero-padding
outputs += filler
UpperCAmelCase_ : Optional[int] = np.abs(np.fft.fft(__snake_case ) )
UpperCAmelCase_ : List[str] = 20 * np.logaa(__snake_case )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
# Display within reasonable bounds
UpperCAmelCase_ : Union[str, Any] = get_bounds(__snake_case , __snake_case )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('Gain (dB)' )
plt.plot(__snake_case )
plt.show()
def lowercase__ ( __snake_case : FilterType , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = 512
UpperCAmelCase_ : Tuple = [1] + [0] * (size - 1)
UpperCAmelCase_ : Tuple = [filter_type.process(__snake_case ) for item in inputs]
UpperCAmelCase_ : List[str] = [0] * (samplerate - size) # zero-padding
outputs += filler
UpperCAmelCase_ : Dict = np.angle(np.fft.fft(__snake_case ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('Phase shift (Radians)' )
plt.plot(np.unwrap(__snake_case , -2 * pi ) )
plt.show()
| 29 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__UpperCAmelCase = logging.getLogger(__name__)
def lowercase__ ( __snake_case : List[Any]=2 , __snake_case : Union[str, Any]=3 , __snake_case : Any=16 , __snake_case : int = 10 , __snake_case : int = 2 ):
'''simple docstring'''
def get_dataset(__snake_case : Optional[Any] ):
UpperCAmelCase_ : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCAmelCase_ : Any = get_dataset(__snake_case )
UpperCAmelCase_ : str = get_dataset(__snake_case )
UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowercase__ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple=None ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = []
for epoch in range(__snake_case ):
# Train quickly
model.train()
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = batch
UpperCAmelCase_ : List[Any] = model(__snake_case )
UpperCAmelCase_ : int = torch.nn.functional.mse_loss(__snake_case , __snake_case )
accelerator.backward(__snake_case )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class lowerCamelCase (nn.Module ):
'''simple docstring'''
def __init__( self ) -> Optional[Any]:
super().__init__()
UpperCAmelCase_ : List[Any] = nn.Parameter(torch.randn(1 ) )
UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.randn(1 ) )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[Any]:
return x * self.a + self.b
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders()
UpperCAmelCase_ : Optional[int] = ProjectConfiguration(total_limit=1 , project_dir=_UpperCamelCase , automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : Dict = Accelerator(project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Optional[Any] = DummyModel()
UpperCAmelCase_ : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders()
# Train baseline
UpperCAmelCase_ : Tuple = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
UpperCAmelCase_ : Any = os.path.join(_UpperCamelCase , 'initial' )
accelerator.save_state(_UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase_ : Dict = optimizer.state_dict()
UpperCAmelCase_ : Union[str, Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Union[str, Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Any = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase_ : int = DummyModel()
UpperCAmelCase_ : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : str = dummy_dataloaders()
UpperCAmelCase_ : Optional[Any] = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
accelerator.load_state(_UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : List[str] = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Dict = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save everything
UpperCAmelCase_ : Union[str, Any] = os.path.join(_UpperCamelCase , 'checkpoint' )
accelerator.save_state(_UpperCamelCase )
# Load everything back in and make sure all states work
accelerator.load_state(_UpperCamelCase )
test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dummy_dataloaders()
UpperCAmelCase_ : Any = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : str = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
UpperCAmelCase_ : Optional[Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Tuple = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase_ : Any = DummyModel()
UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders()
UpperCAmelCase_ : Tuple = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) )
((UpperCAmelCase_) , (UpperCAmelCase_)) : str = model.a.item(), model.b.item()
UpperCAmelCase_ : List[Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : List[Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Dict = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Optional[Any] = torch.tensor([1, 2, 3] )
UpperCAmelCase_ : Any = torch.tensor([2, 3, 4] )
UpperCAmelCase_ : Union[str, Any] = DummyModel()
UpperCAmelCase_ : List[str] = torch.optim.Adam(net.parameters() )
UpperCAmelCase_ : Any = Accelerator()
with self.assertRaises(_UpperCamelCase ) as ve:
accelerator.register_for_checkpointing(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[int] = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : int = DummyModel()
UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ : Dict = torch.optim.lr_scheduler.StepLR(_UpperCamelCase , step_size=1 , gamma=0.99 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders()
UpperCAmelCase_ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : Tuple = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
UpperCAmelCase_ : Dict = scheduler.state_dict()
train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
self.assertNotEqual(_UpperCamelCase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(_UpperCamelCase , scheduler.state_dict() )
def __UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Optional[int] = DummyModel()
UpperCAmelCase_ : Dict = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase , total_limit=2 )
# Train baseline
UpperCAmelCase_ : Optional[int] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ : str = accelerator.prepare(_UpperCamelCase )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : List[str] = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
__UpperCAmelCase = '/tmp/accelerate/state_checkpointing'
__UpperCAmelCase = DummyModel()
__UpperCAmelCase = torch.optim.Adam(params=model.parameters(), lr=1E-3)
__UpperCAmelCase = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
__UpperCAmelCase , __UpperCAmelCase = dummy_dataloaders()
__UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__UpperCAmelCase = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert param_device.type == accelerator.device.type
__UpperCAmelCase = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 29 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {'configuration_vit': ['VIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTConfig', 'ViTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['ViTFeatureExtractor']
__UpperCAmelCase = ['ViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'VIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTForImageClassification',
'ViTForMaskedImageModeling',
'ViTModel',
'ViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TFViTForImageClassification',
'TFViTModel',
'TFViTPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'FlaxViTForImageClassification',
'FlaxViTModel',
'FlaxViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> None:
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
| 29 | 1 |
import os
from math import logaa
def lowercase__ ( __snake_case : str = "base_exp.txt" ):
'''simple docstring'''
UpperCAmelCase_ : float = 0
UpperCAmelCase_ : Tuple = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__snake_case ) , __snake_case ) ) ):
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = list(map(__snake_case , line.split(',' ) ) )
if x * logaa(__snake_case ) > largest:
UpperCAmelCase_ : Union[str, Any] = x * logaa(__snake_case )
UpperCAmelCase_ : Dict = i + 1
return result
if __name__ == "__main__":
print(solution())
| 29 |
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if not head:
return True
# split the list to two parts
UpperCAmelCase_ , UpperCAmelCase_ : Any = head.next, head
while fast and fast.next:
UpperCAmelCase_ : str = fast.next.next
UpperCAmelCase_ : Union[str, Any] = slow.next
UpperCAmelCase_ : int = slow.next
UpperCAmelCase_ : List[Any] = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase_ : Tuple = None
while second:
UpperCAmelCase_ : int = second.next
UpperCAmelCase_ : Any = node
UpperCAmelCase_ : Optional[Any] = second
UpperCAmelCase_ : Tuple = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase_ : Optional[Any] = node.next
UpperCAmelCase_ : Dict = head.next
return True
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase_ : Any = head
while fast and fast.next:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase_ : List[str] = [slow.val]
while slow.next:
UpperCAmelCase_ : List[str] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase_ : int = cur.next
return True
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if not head or not head.next:
return True
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : int = 0
while head:
if head.val in d:
d[head.val].append(__snake_case )
else:
UpperCAmelCase_ : List[Any] = [pos]
UpperCAmelCase_ : Any = head.next
pos += 1
UpperCAmelCase_ : Dict = pos - 1
UpperCAmelCase_ : Optional[int] = 0
for v in d.values():
if len(__snake_case ) % 2 != 0:
middle += 1
else:
UpperCAmelCase_ : int = 0
for i in range(0 , len(__snake_case ) ):
if v[i] + v[len(__snake_case ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 29 | 1 |
import qiskit
def lowercase__ ( __snake_case : int , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : Any = qiskit.Aer.get_backend('aer_simulator' )
UpperCAmelCase_ : str = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
UpperCAmelCase_ : Optional[int] = qiskit.execute(__snake_case , __snake_case , shots=1_000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(__snake_case )
if __name__ == "__main__":
__UpperCAmelCase = half_adder(1, 1)
print(F'Half Adder Output Qubit Counts: {counts}')
| 29 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 | 1 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase (_snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : Tuple = ConsistencyModelPipeline
_snake_case : Union[str, Any] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_snake_case : Union[str, Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
_snake_case : Optional[Any] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
@property
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : Dict = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : str = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def __UpperCAmelCase ( self , _UpperCamelCase=False ) -> Any:
if class_cond:
UpperCAmelCase_ : List[Any] = self.dummy_cond_unet
else:
UpperCAmelCase_ : int = self.dummy_uncond_unet
# Default to CM multistep sampler
UpperCAmelCase_ : Dict = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , )
UpperCAmelCase_ : int = {
'unet': unet,
'scheduler': scheduler,
}
return components
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> List[Any]:
if str(_UpperCamelCase ).startswith('mps' ):
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(_UpperCamelCase )
else:
UpperCAmelCase_ : Dict = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
UpperCAmelCase_ : Tuple = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [2_2, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase_ : Dict = ConsistencyModelPipeline(**_UpperCamelCase )
UpperCAmelCase_ : List[Any] = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = self.get_dummy_inputs(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = pipe(**_UpperCamelCase ).images
assert image.shape == (1, 3_2, 3_2, 3)
UpperCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : List[str] = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : str = self.get_dummy_components(class_cond=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = ConsistencyModelPipeline(**_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = self.get_dummy_inputs(_UpperCamelCase )
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : Dict = pipe(**_UpperCamelCase ).images
assert image.shape == (1, 3_2, 3_2, 3)
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : List[str] = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : List[Any] = self.get_dummy_components()
UpperCAmelCase_ : Union[str, Any] = ConsistencyModelPipeline(**_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = self.get_dummy_inputs(_UpperCamelCase )
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : str = pipe(**_UpperCamelCase ).images
assert image.shape == (1, 3_2, 3_2, 3)
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[int] = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Tuple = self.get_dummy_components(class_cond=_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = ConsistencyModelPipeline(**_UpperCamelCase )
UpperCAmelCase_ : List[Any] = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
UpperCAmelCase_ : str = self.get_dummy_inputs(_UpperCamelCase )
UpperCAmelCase_ : Tuple = 1
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : List[str] = pipe(**_UpperCamelCase ).images
assert image.shape == (1, 3_2, 3_2, 3)
UpperCAmelCase_ : str = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self , _UpperCamelCase=0 , _UpperCamelCase=False , _UpperCamelCase="cpu" , _UpperCamelCase=torch.floataa , _UpperCamelCase=(1, 3, 6_4, 6_4) ) -> int:
UpperCAmelCase_ : List[str] = torch.manual_seed(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = {
'num_inference_steps': None,
'timesteps': [2_2, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
UpperCAmelCase_ : Any = self.get_fixed_latents(seed=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase , shape=_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = latents
return inputs
def __UpperCAmelCase ( self , _UpperCamelCase=0 , _UpperCamelCase="cpu" , _UpperCamelCase=torch.floataa , _UpperCamelCase=(1, 3, 6_4, 6_4) ) -> Tuple:
if type(_UpperCamelCase ) == str:
UpperCAmelCase_ : str = torch.device(_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
return latents
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
UpperCAmelCase_ : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , )
UpperCAmelCase_ : List[Any] = ConsistencyModelPipeline(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
pipe.to(torch_device=_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = self.get_inputs()
UpperCAmelCase_ : Any = pipe(**_UpperCamelCase ).images
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase_ : str = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[int] = np.array([0.08_88, 0.08_81, 0.06_66, 0.04_79, 0.02_92, 0.01_95, 0.02_01, 0.01_63, 0.02_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : Dict = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
UpperCAmelCase_ : str = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , )
UpperCAmelCase_ : Optional[Any] = ConsistencyModelPipeline(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
pipe.to(torch_device=_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = self.get_inputs()
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : int = pipe(**_UpperCamelCase ).images
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[int] = np.array([0.03_40, 0.01_52, 0.00_63, 0.02_67, 0.02_21, 0.01_07, 0.04_16, 0.01_86, 0.02_17] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : List[str] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
UpperCAmelCase_ : int = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , )
UpperCAmelCase_ : Union[str, Any] = ConsistencyModelPipeline(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
pipe.to(torch_device=_UpperCamelCase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = self.get_inputs(get_fixed_latents=_UpperCamelCase , device=_UpperCamelCase )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=_UpperCamelCase , enable_math=_UpperCamelCase , enable_mem_efficient=_UpperCamelCase ):
UpperCAmelCase_ : List[Any] = pipe(**_UpperCamelCase ).images
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = np.array([0.18_75, 0.14_28, 0.12_89, 0.21_51, 0.20_92, 0.14_77, 0.18_77, 0.16_41, 0.13_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : List[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
UpperCAmelCase_ : List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , )
UpperCAmelCase_ : int = ConsistencyModelPipeline(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
pipe.to(torch_device=_UpperCamelCase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
UpperCAmelCase_ : Dict = self.get_inputs(get_fixed_latents=_UpperCamelCase , device=_UpperCamelCase )
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : str = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=_UpperCamelCase , enable_math=_UpperCamelCase , enable_mem_efficient=_UpperCamelCase ):
UpperCAmelCase_ : Tuple = pipe(**_UpperCamelCase ).images
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : str = np.array([0.16_63, 0.19_48, 0.22_75, 0.16_80, 0.12_04, 0.12_45, 0.18_58, 0.13_38, 0.20_95] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 29 |
__UpperCAmelCase = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 29 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowercase__ ( __snake_case : str , __snake_case : str , __snake_case : str , __snake_case : PreTrainedTokenizer , __snake_case : int , __snake_case : Optional[int] = None , ):
'''simple docstring'''
UpperCAmelCase_ : Any = {}
if train_file is not None:
UpperCAmelCase_ : Any = [train_file]
if eval_file is not None:
UpperCAmelCase_ : int = [eval_file]
if test_file is not None:
UpperCAmelCase_ : str = [test_file]
UpperCAmelCase_ : int = datasets.load_dataset('csv' , data_files=__snake_case )
UpperCAmelCase_ : List[Any] = list(ds[list(files.keys() )[0]].features.keys() )
UpperCAmelCase_ : List[str] = features_name.pop(__snake_case )
UpperCAmelCase_ : Any = list(set(ds[list(files.keys() )[0]][label_name] ) )
UpperCAmelCase_ : List[str] = {label: i for i, label in enumerate(__snake_case )}
UpperCAmelCase_ : Dict = tokenizer.model_input_names
UpperCAmelCase_ : Optional[int] = {}
if len(__snake_case ) == 1:
for k in files.keys():
UpperCAmelCase_ : List[Any] = ds[k].map(
lambda __snake_case : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__snake_case , max_length=__snake_case , padding='max_length' ) , batched=__snake_case , )
elif len(__snake_case ) == 2:
for k in files.keys():
UpperCAmelCase_ : Union[str, Any] = ds[k].map(
lambda __snake_case : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__snake_case , max_length=__snake_case , padding='max_length' , ) , batched=__snake_case , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
UpperCAmelCase_ : Tuple = {k: v for k, v in ex.items() if k in input_names}
UpperCAmelCase_ : str = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
UpperCAmelCase_ : Any = {k: v for k, v in ex.items() if k in input_names}
UpperCAmelCase_ : List[str] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
UpperCAmelCase_ : Any = {k: v for k, v in ex.items() if k in input_names}
UpperCAmelCase_ : Dict = labelaid[ex[label_name]]
yield (d, label)
UpperCAmelCase_ : str = (
tf.data.Dataset.from_generator(
__snake_case , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
UpperCAmelCase_ : Tuple = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
UpperCAmelCase_ : str = (
tf.data.Dataset.from_generator(
__snake_case , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
UpperCAmelCase_ : int = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
UpperCAmelCase_ : List[str] = (
tf.data.Dataset.from_generator(
__snake_case , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
UpperCAmelCase_ : List[Any] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class lowerCamelCase :
'''simple docstring'''
_snake_case : int = field(metadata={'''help''': '''Which column contains the label'''} )
_snake_case : str = field(default=_snake_case , metadata={'''help''': '''The path of the training file'''} )
_snake_case : Optional[str] = field(default=_snake_case , metadata={'''help''': '''The path of the development file'''} )
_snake_case : Optional[str] = field(default=_snake_case , metadata={'''help''': '''The path of the test file'''} )
_snake_case : int = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_snake_case : bool = field(
default=_snake_case , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class lowerCamelCase :
'''simple docstring'''
_snake_case : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_snake_case : Optional[str] = field(
default=_snake_case , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_snake_case : Optional[str] = field(
default=_snake_case , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_snake_case : bool = field(default=_snake_case , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_snake_case : Optional[str] = field(
default=_snake_case , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
F"n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, "
F"16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__snake_case , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
UpperCAmelCase_ : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__snake_case ) , labelaid=__snake_case , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
UpperCAmelCase_ : Dict = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , )
def compute_metrics(__snake_case : EvalPrediction ) -> Dict:
UpperCAmelCase_ : Tuple = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
UpperCAmelCase_ : List[str] = TFTrainer(
model=__snake_case , args=__snake_case , train_dataset=__snake_case , eval_dataset=__snake_case , compute_metrics=__snake_case , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase_ : List[Any] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
UpperCAmelCase_ : Dict = trainer.evaluate()
UpperCAmelCase_ : Dict = os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(__snake_case , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
results.update(__snake_case )
return results
if __name__ == "__main__":
main()
| 29 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : "DiagonalGaussianDistribution"
class lowerCamelCase (_snake_case , _snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = True
@register_to_config
def __init__( self , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = ("DownEncoderBlock2D",) , _UpperCamelCase = ("UpDecoderBlock2D",) , _UpperCamelCase = (6_4,) , _UpperCamelCase = 1 , _UpperCamelCase = "silu" , _UpperCamelCase = 4 , _UpperCamelCase = 3_2 , _UpperCamelCase = 3_2 , _UpperCamelCase = 0.1_82_15 , ) -> List[Any]:
super().__init__()
# pass init params to Encoder
UpperCAmelCase_ : List[str] = Encoder(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , down_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , act_fn=_UpperCamelCase , norm_num_groups=_UpperCamelCase , double_z=_UpperCamelCase , )
# pass init params to Decoder
UpperCAmelCase_ : Dict = Decoder(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , up_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , norm_num_groups=_UpperCamelCase , act_fn=_UpperCamelCase , )
UpperCAmelCase_ : Any = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
UpperCAmelCase_ : List[Any] = nn.Convad(_UpperCamelCase , _UpperCamelCase , 1 )
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : int = False
# only relevant if vae tiling is enabled
UpperCAmelCase_ : Optional[int] = self.config.sample_size
UpperCAmelCase_ : int = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
UpperCAmelCase_ : Union[str, Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
UpperCAmelCase_ : Optional[Any] = 0.25
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False ) -> List[str]:
if isinstance(_UpperCamelCase , (Encoder, Decoder) ):
UpperCAmelCase_ : Union[str, Any] = value
def __UpperCAmelCase ( self , _UpperCamelCase = True ) -> int:
UpperCAmelCase_ : Tuple = use_tiling
def __UpperCAmelCase ( self ) -> Dict:
self.enable_tiling(_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : str = True
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __UpperCAmelCase ( self ) -> Dict[str, AttentionProcessor]:
UpperCAmelCase_ : Optional[int] = {}
def fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if hasattr(_UpperCamelCase , 'set_processor' ):
UpperCAmelCase_ : Optional[int] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return processors
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = len(self.attn_processors.keys() )
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(_UpperCamelCase )} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if hasattr(_UpperCamelCase , 'set_processor' ):
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
module.set_processor(_UpperCamelCase )
else:
module.set_processor(processor.pop(f"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_UpperCamelCase , return_dict=_UpperCamelCase )
if self.use_slicing and x.shape[0] > 1:
UpperCAmelCase_ : Union[str, Any] = [self.encoder(_UpperCamelCase ) for x_slice in x.split(1 )]
UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase )
else:
UpperCAmelCase_ : List[Any] = self.encoder(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = self.quant_conv(_UpperCamelCase )
UpperCAmelCase_ : Tuple = DiagonalGaussianDistribution(_UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_UpperCamelCase , return_dict=_UpperCamelCase )
UpperCAmelCase_ : str = self.post_quant_conv(_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.decoder(_UpperCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
@apply_forward_hook
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
UpperCAmelCase_ : List[str] = [self._decode(_UpperCamelCase ).sample for z_slice in z.split(1 )]
UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase )
else:
UpperCAmelCase_ : Any = self._decode(_UpperCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : Tuple = min(a.shape[2] , b.shape[2] , _UpperCamelCase )
for y in range(_UpperCamelCase ):
UpperCAmelCase_ : str = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Tuple = min(a.shape[3] , b.shape[3] , _UpperCamelCase )
for x in range(_UpperCamelCase ):
UpperCAmelCase_ : int = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput:
UpperCAmelCase_ : Any = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : Tuple = int(self.tile_latent_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : Optional[int] = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
UpperCAmelCase_ : List[str] = []
for i in range(0 , x.shape[2] , _UpperCamelCase ):
UpperCAmelCase_ : Any = []
for j in range(0 , x.shape[3] , _UpperCamelCase ):
UpperCAmelCase_ : Any = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
UpperCAmelCase_ : Dict = self.encoder(_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.quant_conv(_UpperCamelCase )
row.append(_UpperCamelCase )
rows.append(_UpperCamelCase )
UpperCAmelCase_ : str = []
for i, row in enumerate(_UpperCamelCase ):
UpperCAmelCase_ : List[Any] = []
for j, tile in enumerate(_UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Dict = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase )
if j > 0:
UpperCAmelCase_ : List[str] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) )
UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=2 )
UpperCAmelCase_ : List[Any] = DiagonalGaussianDistribution(_UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : str = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : Dict = int(self.tile_sample_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : Dict = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
UpperCAmelCase_ : Union[str, Any] = []
for i in range(0 , z.shape[2] , _UpperCamelCase ):
UpperCAmelCase_ : List[str] = []
for j in range(0 , z.shape[3] , _UpperCamelCase ):
UpperCAmelCase_ : List[str] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
UpperCAmelCase_ : Optional[Any] = self.post_quant_conv(_UpperCamelCase )
UpperCAmelCase_ : Tuple = self.decoder(_UpperCamelCase )
row.append(_UpperCamelCase )
rows.append(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = []
for i, row in enumerate(_UpperCamelCase ):
UpperCAmelCase_ : List[Any] = []
for j, tile in enumerate(_UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Union[str, Any] = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase )
if j > 0:
UpperCAmelCase_ : Optional[Any] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) )
UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : Optional[Any] = sample
UpperCAmelCase_ : Union[str, Any] = self.encode(_UpperCamelCase ).latent_dist
if sample_posterior:
UpperCAmelCase_ : str = posterior.sample(generator=_UpperCamelCase )
else:
UpperCAmelCase_ : int = posterior.mode()
UpperCAmelCase_ : Dict = self.decode(_UpperCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
| 29 | 1 |
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(_snake_case ) , '''Tatoeba directory does not exist.''' )
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : Any = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_UpperCamelCase )
@slow
def __UpperCAmelCase ( self ) -> Optional[Any]:
self.resolver.convert_models(['heb-eng'] )
@slow
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.resolver.write_model_card('opus-mt-he-en' , dry_run=_UpperCamelCase )
assert mmeta["long_pair"] == "heb-eng"
| 29 |
def lowercase__ ( __snake_case : int , __snake_case : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCAmelCase_ : Tuple = str(bin(__snake_case ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : Union[str, Any] = str(bin(__snake_case ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : List[Any] = max(len(__snake_case ) , len(__snake_case ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(__snake_case ) , b_binary.zfill(__snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 1 |
def lowercase__ ( __snake_case : int = 1_000_000 ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = set(range(3 , __snake_case , 2 ) )
primes.add(2 )
for p in range(3 , __snake_case , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , __snake_case , __snake_case ) ) )
UpperCAmelCase_ : List[str] = [float(__snake_case ) for n in range(limit + 1 )]
for p in primes:
for n in range(__snake_case , limit + 1 , __snake_case ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 29 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'vocab.txt'}
__UpperCAmelCase = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__UpperCAmelCase = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__UpperCAmelCase = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = VOCAB_FILES_NAMES
_snake_case : int = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Dict = PRETRAINED_INIT_CONFIGURATION
_snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Any = ConvBertTokenizer
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase="[UNK]" , _UpperCamelCase="[SEP]" , _UpperCamelCase="[PAD]" , _UpperCamelCase="[CLS]" , _UpperCamelCase="[MASK]" , _UpperCamelCase=True , _UpperCamelCase=None , **_UpperCamelCase , ) -> Dict:
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
UpperCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _UpperCamelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ : Any = getattr(_UpperCamelCase , normalizer_state.pop('type' ) )
UpperCAmelCase_ : str = do_lower_case
UpperCAmelCase_ : List[Any] = strip_accents
UpperCAmelCase_ : str = tokenize_chinese_chars
UpperCAmelCase_ : Tuple = normalizer_class(**_UpperCamelCase )
UpperCAmelCase_ : Any = do_lower_case
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=None ) -> List[str]:
UpperCAmelCase_ : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
UpperCAmelCase_ : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
UpperCAmelCase_ : Any = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 29 | 1 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[str]=0.999 , __snake_case : str="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(__snake_case : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__snake_case : Optional[int] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" )
UpperCAmelCase_ : Dict = []
for i in range(__snake_case ):
UpperCAmelCase_ : Dict = i / num_diffusion_timesteps
UpperCAmelCase_ : List[str] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__snake_case ) / alpha_bar_fn(__snake_case ) , __snake_case ) )
return torch.tensor(__snake_case , dtype=torch.floataa )
class lowerCamelCase (_snake_case , _snake_case ):
'''simple docstring'''
_snake_case : Union[str, Any] = [e.name for e in KarrasDiffusionSchedulers]
_snake_case : int = 2
@register_to_config
def __init__( self , _UpperCamelCase = 1_0_0_0 , _UpperCamelCase = 0.0_00_85 , _UpperCamelCase = 0.0_12 , _UpperCamelCase = "linear" , _UpperCamelCase = None , _UpperCamelCase = "epsilon" , _UpperCamelCase = "linspace" , _UpperCamelCase = 0 , ) -> Optional[Any]:
if trained_betas is not None:
UpperCAmelCase_ : Any = torch.tensor(_UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
UpperCAmelCase_ : Optional[int] = torch.linspace(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCAmelCase_ : str = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _UpperCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCAmelCase_ : List[str] = betas_for_alpha_bar(_UpperCamelCase )
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}" )
UpperCAmelCase_ : Dict = 1.0 - self.betas
UpperCAmelCase_ : Tuple = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=None ) -> List[Any]:
if schedule_timesteps is None:
UpperCAmelCase_ : Optional[Any] = self.timesteps
UpperCAmelCase_ : Any = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
UpperCAmelCase_ : int = 1 if len(_UpperCamelCase ) > 1 else 0
else:
UpperCAmelCase_ : Union[str, Any] = timestep.cpu().item() if torch.is_tensor(_UpperCamelCase ) else timestep
UpperCAmelCase_ : Optional[int] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __UpperCAmelCase ( self ) -> Optional[Any]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , ) -> torch.FloatTensor:
UpperCAmelCase_ : List[Any] = self.index_for_timestep(_UpperCamelCase )
if self.state_in_first_order:
UpperCAmelCase_ : Dict = self.sigmas[step_index]
else:
UpperCAmelCase_ : Union[str, Any] = self.sigmas_interpol[step_index]
UpperCAmelCase_ : Optional[int] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , ) -> List[str]:
UpperCAmelCase_ : Dict = num_inference_steps
UpperCAmelCase_ : str = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
UpperCAmelCase_ : List[Any] = np.linspace(0 , num_train_timesteps - 1 , _UpperCamelCase , dtype=_UpperCamelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
UpperCAmelCase_ : str = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase_ : List[Any] = (np.arange(0 , _UpperCamelCase ) * step_ratio).round()[::-1].copy().astype(_UpperCamelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
UpperCAmelCase_ : Optional[Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase_ : List[str] = (np.arange(_UpperCamelCase , 0 , -step_ratio )).round().copy().astype(_UpperCamelCase )
timesteps -= 1
else:
raise ValueError(
f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." )
UpperCAmelCase_ : Union[str, Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
UpperCAmelCase_ : List[str] = torch.from_numpy(np.log(_UpperCamelCase ) ).to(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = np.interp(_UpperCamelCase , np.arange(0 , len(_UpperCamelCase ) ) , _UpperCamelCase )
UpperCAmelCase_ : List[str] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
UpperCAmelCase_ : str = torch.from_numpy(_UpperCamelCase ).to(device=_UpperCamelCase )
# interpolate sigmas
UpperCAmelCase_ : Optional[int] = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
UpperCAmelCase_ : Optional[Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
UpperCAmelCase_ : Optional[int] = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(_UpperCamelCase ).startswith('mps' ):
# mps does not support float64
UpperCAmelCase_ : Optional[Any] = torch.from_numpy(_UpperCamelCase ).to(_UpperCamelCase , dtype=torch.floataa )
else:
UpperCAmelCase_ : Any = torch.from_numpy(_UpperCamelCase ).to(_UpperCamelCase )
# interpolate timesteps
UpperCAmelCase_ : Optional[int] = self.sigma_to_t(_UpperCamelCase ).to(_UpperCamelCase , dtype=timesteps.dtype )
UpperCAmelCase_ : Optional[Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
UpperCAmelCase_ : Tuple = torch.cat([timesteps[:1], interleaved_timesteps] )
UpperCAmelCase_ : Dict = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
UpperCAmelCase_ : Dict = defaultdict(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> str:
# get log sigma
UpperCAmelCase_ : List[str] = sigma.log()
# get distribution
UpperCAmelCase_ : Any = log_sigma - self.log_sigmas[:, None]
# get sigmas range
UpperCAmelCase_ : int = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
UpperCAmelCase_ : List[str] = low_idx + 1
UpperCAmelCase_ : List[str] = self.log_sigmas[low_idx]
UpperCAmelCase_ : int = self.log_sigmas[high_idx]
# interpolate sigmas
UpperCAmelCase_ : Optional[Any] = (low - log_sigma) / (low - high)
UpperCAmelCase_ : Any = w.clamp(0 , 1 )
# transform interpolation to time range
UpperCAmelCase_ : Union[str, Any] = (1 - w) * low_idx + w * high_idx
UpperCAmelCase_ : Optional[Any] = t.view(sigma.shape )
return t
@property
def __UpperCAmelCase ( self ) -> int:
return self.sample is None
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = True , ) -> Union[SchedulerOutput, Tuple]:
UpperCAmelCase_ : List[Any] = self.index_for_timestep(_UpperCamelCase )
# advance index counter by 1
UpperCAmelCase_ : Union[str, Any] = timestep.cpu().item() if torch.is_tensor(_UpperCamelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
UpperCAmelCase_ : List[str] = self.sigmas[step_index]
UpperCAmelCase_ : List[str] = self.sigmas_interpol[step_index + 1]
UpperCAmelCase_ : Optional[Any] = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
UpperCAmelCase_ : Union[str, Any] = self.sigmas[step_index - 1]
UpperCAmelCase_ : Any = self.sigmas_interpol[step_index]
UpperCAmelCase_ : List[Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : int = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
UpperCAmelCase_ : Dict = sigma_hat if self.state_in_first_order else sigma_interpol
UpperCAmelCase_ : Optional[int] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase_ : Any = sigma_hat if self.state_in_first_order else sigma_interpol
UpperCAmelCase_ : Optional[int] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('prediction_type not implemented yet: sample' )
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
UpperCAmelCase_ : List[str] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
UpperCAmelCase_ : Dict = sigma_interpol - sigma_hat
# store for 2nd order step
UpperCAmelCase_ : Any = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
UpperCAmelCase_ : Union[str, Any] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
UpperCAmelCase_ : Any = sigma_next - sigma_hat
UpperCAmelCase_ : Dict = self.sample
UpperCAmelCase_ : str = None
UpperCAmelCase_ : List[str] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCAmelCase_ : Tuple = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_UpperCamelCase ):
# mps does not support float64
UpperCAmelCase_ : Optional[Any] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
UpperCAmelCase_ : Union[str, Any] = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
UpperCAmelCase_ : Any = self.timesteps.to(original_samples.device )
UpperCAmelCase_ : Optional[Any] = timesteps.to(original_samples.device )
UpperCAmelCase_ : Any = [self.index_for_timestep(_UpperCamelCase , _UpperCamelCase ) for t in timesteps]
UpperCAmelCase_ : int = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
UpperCAmelCase_ : Optional[Any] = sigma.unsqueeze(-1 )
UpperCAmelCase_ : int = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> Union[str, Any]:
return self.config.num_train_timesteps
| 29 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'snap-research/efficientformer-l1-300': (
'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'
),
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = '''efficientformer'''
def __init__( self , _UpperCamelCase = [3, 2, 6, 4] , _UpperCamelCase = [4_8, 9_6, 2_2_4, 4_4_8] , _UpperCamelCase = [True, True, True, True] , _UpperCamelCase = 4_4_8 , _UpperCamelCase = 3_2 , _UpperCamelCase = 4 , _UpperCamelCase = 7 , _UpperCamelCase = 5 , _UpperCamelCase = 8 , _UpperCamelCase = 4 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1_6 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 2 , _UpperCamelCase = 1 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1 , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1E-5 , _UpperCamelCase = "gelu" , _UpperCamelCase = 0.02 , _UpperCamelCase = 1E-12 , _UpperCamelCase = 2_2_4 , _UpperCamelCase = 1E-05 , **_UpperCamelCase , ) -> None:
super().__init__(**_UpperCamelCase )
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase_ : Tuple = hidden_sizes
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : List[str] = patch_size
UpperCAmelCase_ : Union[str, Any] = num_channels
UpperCAmelCase_ : Optional[Any] = depths
UpperCAmelCase_ : List[Any] = mlp_expansion_ratio
UpperCAmelCase_ : List[str] = downsamples
UpperCAmelCase_ : List[Any] = dim
UpperCAmelCase_ : Tuple = key_dim
UpperCAmelCase_ : Optional[int] = attention_ratio
UpperCAmelCase_ : str = resolution
UpperCAmelCase_ : Dict = pool_size
UpperCAmelCase_ : Union[str, Any] = downsample_patch_size
UpperCAmelCase_ : List[str] = downsample_stride
UpperCAmelCase_ : List[str] = downsample_pad
UpperCAmelCase_ : Any = drop_path_rate
UpperCAmelCase_ : Dict = num_metaad_blocks
UpperCAmelCase_ : Dict = distillation
UpperCAmelCase_ : int = use_layer_scale
UpperCAmelCase_ : Any = layer_scale_init_value
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : Dict = batch_norm_eps
| 29 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def lowercase__ ( __snake_case : Accelerator , __snake_case : int = 16 ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
UpperCAmelCase_ : Any = load_dataset('glue' , 'mrpc' )
def tokenize_function(__snake_case : Tuple ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : Union[str, Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase_ : List[Any] = datasets.map(
__snake_case , batched=__snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : List[str] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__snake_case : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase_ : Optional[int] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase_ : int = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase_ : Union[str, Any] = 8
else:
UpperCAmelCase_ : List[str] = None
return tokenizer.pad(
__snake_case , padding='longest' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='pt' , )
# Instantiate dataloaders.
UpperCAmelCase_ : int = DataLoader(
tokenized_datasets['train'] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
UpperCAmelCase_ : Tuple = DataLoader(
tokenized_datasets['validation'] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__UpperCAmelCase = mocked_dataloaders # noqa: F811
def lowercase__ ( __snake_case : List[str] , __snake_case : Optional[int] ):
'''simple docstring'''
if os.environ.get('TESTING_MOCKED_DATALOADERS' , __snake_case ) == "1":
UpperCAmelCase_ : Optional[int] = 2
# New Code #
UpperCAmelCase_ : int = int(args.gradient_accumulation_steps )
UpperCAmelCase_ : List[Any] = int(args.local_sgd_steps )
# Initialize accelerator
UpperCAmelCase_ : Dict = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__snake_case )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : List[str] = config['lr']
UpperCAmelCase_ : str = int(config['num_epochs'] )
UpperCAmelCase_ : str = int(config['seed'] )
UpperCAmelCase_ : str = int(config['batch_size'] )
UpperCAmelCase_ : List[str] = evaluate.load('glue' , 'mrpc' )
set_seed(__snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = get_dataloaders(__snake_case , __snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase_ : Any = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase_ : Optional[Any] = AdamW(params=model.parameters() , lr=__snake_case )
# Instantiate scheduler
UpperCAmelCase_ : Tuple = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Now we train the model
for epoch in range(__snake_case ):
model.train()
with LocalSGD(
accelerator=__snake_case , model=__snake_case , local_sgd_steps=__snake_case , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__snake_case ):
UpperCAmelCase_ : str = model(**__snake_case )
UpperCAmelCase_ : List[str] = output.loss
accelerator.backward(__snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : Any = model(**__snake_case )
UpperCAmelCase_ : Union[str, Any] = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
UpperCAmelCase_ : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __snake_case )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=__snake_case , default=__snake_case , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=__snake_case , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument(
'--local_sgd_steps' , type=__snake_case , default=8 , help='Number of local SGD steps or None to disable local SGD' )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
UpperCAmelCase_ : Dict = parser.parse_args()
UpperCAmelCase_ : List[Any] = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 29 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Union[PIL.Image.Image, np.ndarray]
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Any:
super().__init__()
self.register_modules(
prior=_UpperCamelCase , image_encoder=_UpperCamelCase , image_processor=_UpperCamelCase , scheduler=_UpperCamelCase , renderer=_UpperCamelCase , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
if latents is None:
UpperCAmelCase_ : str = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
UpperCAmelCase_ : Tuple = latents.to(_UpperCamelCase )
UpperCAmelCase_ : Tuple = latents * scheduler.init_noise_sigma
return latents
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase_ : int = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase_ : int = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
@property
def __UpperCAmelCase ( self ) -> int:
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_UpperCamelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> str:
if isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , torch.Tensor ):
UpperCAmelCase_ : int = torch.cat(_UpperCamelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(_UpperCamelCase , axis=0 )
if not isinstance(_UpperCamelCase , torch.Tensor ):
UpperCAmelCase_ : Optional[int] = self.image_processor(_UpperCamelCase , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
UpperCAmelCase_ : Tuple = image.to(dtype=self.image_encoder.dtype , device=_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = self.image_encoder(_UpperCamelCase )['last_hidden_state']
UpperCAmelCase_ : Union[str, Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
UpperCAmelCase_ : List[str] = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : Dict = torch.zeros_like(_UpperCamelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase_ : Optional[int] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = 2_5 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 4.0 , _UpperCamelCase = 6_4 , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> Union[str, Any]:
if isinstance(_UpperCamelCase , PIL.Image.Image ):
UpperCAmelCase_ : Tuple = 1
elif isinstance(_UpperCamelCase , torch.Tensor ):
UpperCAmelCase_ : str = image.shape[0]
elif isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
UpperCAmelCase_ : Optional[int] = len(_UpperCamelCase )
else:
raise ValueError(
f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_UpperCamelCase )}" )
UpperCAmelCase_ : Tuple = self._execution_device
UpperCAmelCase_ : str = batch_size * num_images_per_prompt
UpperCAmelCase_ : str = guidance_scale > 1.0
UpperCAmelCase_ : str = self._encode_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# prior
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
UpperCAmelCase_ : int = self.scheduler.timesteps
UpperCAmelCase_ : int = self.prior.config.num_embeddings
UpperCAmelCase_ : Any = self.prior.config.embedding_dim
UpperCAmelCase_ : List[str] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
UpperCAmelCase_ : List[Any] = latents.reshape(latents.shape[0] , _UpperCamelCase , _UpperCamelCase )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : Optional[Any] = self.scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : int = self.prior(
_UpperCamelCase , timestep=_UpperCamelCase , proj_embedding=_UpperCamelCase , ).predicted_image_embedding
# remove the variance
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 )
UpperCAmelCase_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
UpperCAmelCase_ : List[str] = self.scheduler.step(
_UpperCamelCase , timestep=_UpperCamelCase , sample=_UpperCamelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = []
for i, latent in enumerate(_UpperCamelCase ):
print()
UpperCAmelCase_ : List[str] = self.renderer.decode(
latent[None, :] , _UpperCamelCase , size=_UpperCamelCase , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = torch.stack(_UpperCamelCase )
if output_type not in ["np", "pil"]:
raise ValueError(f"Only the output types `pil` and `np` are supported not output_type={output_type}" )
UpperCAmelCase_ : Dict = images.cpu().numpy()
if output_type == "pil":
UpperCAmelCase_ : List[str] = [self.numpy_to_pil(_UpperCamelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_UpperCamelCase )
| 29 | 1 |
import os
import pytest
from attr import dataclass
__UpperCAmelCase = 'us-east-1' # defaults region
@dataclass
class lowerCamelCase :
'''simple docstring'''
_snake_case : str
_snake_case : List[Any] = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
_snake_case : Any = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 1_6,
'''per_device_eval_batch_size''': 1_6,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 5_0_0,
'''save_steps''': 5_5_0_0,
}
_snake_case : Tuple = {**hyperparameters, '''max_steps''': 1_0_0_0}
@property
def __UpperCAmelCase ( self ) -> str:
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def __UpperCAmelCase ( self ) -> str:
return f"{self.framework}-transfromers-test"
@property
def __UpperCAmelCase ( self ) -> str:
return f"./tests/sagemaker/scripts/{self.framework}"
@property
def __UpperCAmelCase ( self ) -> str:
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Any = SageMakerTestEnvironment(framework=request.cls.framework )
| 29 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : Union[str, Any] = IFImgaImgSuperResolutionPipeline
_snake_case : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
_snake_case : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
_snake_case : List[str] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __UpperCAmelCase ( self ) -> Optional[Any]:
return self._get_superresolution_dummy_components()
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> Any:
if str(_UpperCamelCase ).startswith('mps' ):
UpperCAmelCase_ : List[Any] = torch.manual_seed(_UpperCamelCase )
else:
UpperCAmelCase_ : int = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
UpperCAmelCase_ : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
UpperCAmelCase_ : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCAmelCase ( self ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __UpperCAmelCase ( self ) -> Dict:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __UpperCAmelCase ( self ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __UpperCAmelCase ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self._test_save_load_local()
def __UpperCAmelCase ( self ) -> Dict:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 29 | 1 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__UpperCAmelCase = object()
# For specifying empty leaf dict `{}`
__UpperCAmelCase = object()
def lowercase__ ( __snake_case : List[Any] , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(__snake_case ) - len(__snake_case ) + 1 ):
UpperCAmelCase_ : Any = [x.match(__snake_case ) for x, y in zip(__snake_case , ks[i:] )]
if matches and all(__snake_case ):
return True
return False
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
def replace(__snake_case : List[Any] , __snake_case : Optional[Any] ):
for rule, replacement in rules:
if _match(__snake_case , __snake_case ):
return replacement
return val
return replace
def lowercase__ ( ):
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , __snake_case )),
(("transformer", "wte", "embedding"), P('mp' , __snake_case )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__snake_case , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , __snake_case )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__snake_case , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , __snake_case )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = _get_partition_rules()
UpperCAmelCase_ : Optional[Any] = _replacement_rules(__snake_case )
UpperCAmelCase_ : Dict = {k: _unmatched for k in flatten_dict(__snake_case )}
UpperCAmelCase_ : Optional[Any] = {k: replace(__snake_case , __snake_case ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__snake_case ) )
| 29 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Union[str, Any] = '''yolos'''
def __init__( self , _UpperCamelCase=7_6_8 , _UpperCamelCase=1_2 , _UpperCamelCase=1_2 , _UpperCamelCase=3_0_7_2 , _UpperCamelCase="gelu" , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=0.02 , _UpperCamelCase=1E-12 , _UpperCamelCase=[5_1_2, 8_6_4] , _UpperCamelCase=1_6 , _UpperCamelCase=3 , _UpperCamelCase=True , _UpperCamelCase=1_0_0 , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=1 , _UpperCamelCase=5 , _UpperCamelCase=2 , _UpperCamelCase=5 , _UpperCamelCase=2 , _UpperCamelCase=0.1 , **_UpperCamelCase , ) -> Optional[Any]:
super().__init__(**_UpperCamelCase )
UpperCAmelCase_ : List[str] = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_attention_heads
UpperCAmelCase_ : Optional[Any] = intermediate_size
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : int = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : Optional[Any] = layer_norm_eps
UpperCAmelCase_ : Dict = image_size
UpperCAmelCase_ : List[str] = patch_size
UpperCAmelCase_ : Optional[Any] = num_channels
UpperCAmelCase_ : Union[str, Any] = qkv_bias
UpperCAmelCase_ : Optional[int] = num_detection_tokens
UpperCAmelCase_ : Dict = use_mid_position_embeddings
UpperCAmelCase_ : Union[str, Any] = auxiliary_loss
# Hungarian matcher
UpperCAmelCase_ : List[str] = class_cost
UpperCAmelCase_ : Dict = bbox_cost
UpperCAmelCase_ : str = giou_cost
# Loss coefficients
UpperCAmelCase_ : List[str] = bbox_loss_coefficient
UpperCAmelCase_ : Union[str, Any] = giou_loss_coefficient
UpperCAmelCase_ : int = eos_coefficient
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : List[Any] = version.parse('''1.11''' )
@property
def __UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __UpperCAmelCase ( self ) -> float:
return 1E-4
@property
def __UpperCAmelCase ( self ) -> int:
return 1_2
| 29 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase=None , **_UpperCamelCase ) -> Dict:
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
UpperCAmelCase_ : Any = model
UpperCAmelCase_ : int = kwargs.get('model_save_dir' , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = kwargs.get('latest_model_name' , _UpperCamelCase )
def __call__( self , **_UpperCamelCase ) -> str:
UpperCAmelCase_ : Optional[int] = {k: np.array(_UpperCamelCase ) for k, v in kwargs.items()}
return self.model.run(_UpperCamelCase , _UpperCamelCase )
@staticmethod
def __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ) -> List[Any]:
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
UpperCAmelCase_ : List[str] = 'CPUExecutionProvider'
return ort.InferenceSession(_UpperCamelCase , providers=[provider] , sess_options=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME
UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name )
UpperCAmelCase_ : str = Path(_UpperCamelCase ).joinpath(_UpperCamelCase )
try:
shutil.copyfile(_UpperCamelCase , _UpperCamelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(_UpperCamelCase )
if src_path.exists():
UpperCAmelCase_ : List[Any] = Path(_UpperCamelCase ).joinpath(_UpperCamelCase )
try:
shutil.copyfile(_UpperCamelCase , _UpperCamelCase )
except shutil.SameFileError:
pass
def __UpperCAmelCase ( self , _UpperCamelCase , **_UpperCamelCase , ) -> List[str]:
if os.path.isfile(_UpperCamelCase ):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file" )
return
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
# saving model weights/files
self._save_pretrained(_UpperCamelCase , **_UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> List[str]:
UpperCAmelCase_ : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(
os.path.join(_UpperCamelCase , _UpperCamelCase ) , provider=_UpperCamelCase , sess_options=_UpperCamelCase )
UpperCAmelCase_ : Tuple = Path(_UpperCamelCase )
# load model from hub
else:
# download model
UpperCAmelCase_ : List[str] = hf_hub_download(
repo_id=_UpperCamelCase , filename=_UpperCamelCase , use_auth_token=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , )
UpperCAmelCase_ : Union[str, Any] = Path(_UpperCamelCase ).parent
UpperCAmelCase_ : List[str] = Path(_UpperCamelCase ).name
UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(_UpperCamelCase , provider=_UpperCamelCase , sess_options=_UpperCamelCase )
return cls(model=_UpperCamelCase , **_UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> Optional[int]:
UpperCAmelCase_ : List[str] = None
if len(str(_UpperCamelCase ).split('@' ) ) == 2:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = model_id.split('@' )
return cls._from_pretrained(
model_id=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , use_auth_token=_UpperCamelCase , **_UpperCamelCase , )
| 29 | 1 |
def lowercase__ ( __snake_case : str , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = word.split()
def justify(__snake_case : list , __snake_case : int , __snake_case : int ) -> str:
UpperCAmelCase_ : List[str] = max_width - width
UpperCAmelCase_ : Dict = len(__snake_case )
if len(__snake_case ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
UpperCAmelCase_ : List[Any] = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
UpperCAmelCase_ : Dict = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
UpperCAmelCase_ : str = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(__snake_case ):
num_spaces_between_words_list[i] += 1
UpperCAmelCase_ : Optional[Any] = []
for i in range(__snake_case ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(__snake_case )
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : list[str] = []
UpperCAmelCase_ : Optional[int] = 0
for word in words:
if width + len(__snake_case ) + len(__snake_case ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(__snake_case )
width += len(__snake_case )
else:
# justify the line and add it to result
answer.append(justify(__snake_case , __snake_case , __snake_case ) )
# reset new line and new width
UpperCAmelCase_ , UpperCAmelCase_ : str = [word], len(__snake_case )
UpperCAmelCase_ : Dict = max_width - width - len(__snake_case )
answer.append(' '.join(__snake_case ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 29 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = 10
UpperCAmelCase_ : Tuple = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
UpperCAmelCase_ : Tuple = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(__snake_case ) ),
} , features=__snake_case , )
return dataset
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : str = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=__snake_case )
return filename
# FILE_CONTENT + files
__UpperCAmelCase = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt'
UpperCAmelCase_ : Tuple = FILE_CONTENT
with open(__snake_case , 'w' ) as f:
f.write(__snake_case )
return filename
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
import bza
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
UpperCAmelCase_ : str = bytes(__snake_case , 'utf-8' )
with bza.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
UpperCAmelCase_ : Dict = bytes(__snake_case , 'utf-8' )
with gzip.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' )
with lza.frame.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : List[Any] ):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(__snake_case , 'w' ) as archive:
archive.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any] ):
'''simple docstring'''
import tarfile
UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
import lzma
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' )
with lzma.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[int] , __snake_case : Optional[Any] ):
'''simple docstring'''
import zipfile
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
UpperCAmelCase_ : Tuple = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
UpperCAmelCase_ : List[str] = bytes(__snake_case , 'utf-8' )
with zstd.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.xml'
UpperCAmelCase_ : List[Any] = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(__snake_case , 'w' ) as f:
f.write(__snake_case )
return filename
__UpperCAmelCase = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
__UpperCAmelCase = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
__UpperCAmelCase = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
__UpperCAmelCase = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
__UpperCAmelCase = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = datasets.Dataset.from_dict(__snake_case )
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(__snake_case ) ) as con:
UpperCAmelCase_ : List[Any] = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(__snake_case , 'w' , newline='' ) as f:
UpperCAmelCase_ : Tuple = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(__snake_case , 'w' , newline='' ) as f:
UpperCAmelCase_ : Optional[Any] = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Any ):
'''simple docstring'''
import bza
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(__snake_case , 'rb' ) as f:
UpperCAmelCase_ : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] , __snake_case : Tuple , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(__snake_case , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : int , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
UpperCAmelCase_ : Dict = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(__snake_case , 'wb' ) as f:
UpperCAmelCase_ : List[Any] = pq.ParquetWriter(__snake_case , schema=__snake_case )
UpperCAmelCase_ : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__snake_case ) )] for k in DATA[0]} , schema=__snake_case )
writer.write_table(__snake_case )
writer.close()
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
UpperCAmelCase_ : Optional[int] = {'data': DATA}
with open(__snake_case , 'w' ) as f:
json.dump(__snake_case , __snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
UpperCAmelCase_ : Tuple = {'data': DATA_DICT_OF_LISTS}
with open(__snake_case , 'w' ) as f:
json.dump(__snake_case , __snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : Dict ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(__snake_case , 'rb' ) as orig_file:
with gzip.open(__snake_case , 'wb' ) as zipped_file:
zipped_file.writelines(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int , __snake_case : Any ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(__snake_case , 'rb' ) as orig_file:
with gzip.open(__snake_case , 'wb' ) as zipped_file:
zipped_file.writelines(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : str , __snake_case : Dict , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : str , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Any , __snake_case : Any , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Any = ['0', '1', '2', '3']
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = ['0', '1', '2', '3']
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ['0', '1', '2', '3']
UpperCAmelCase_ : List[str] = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : str , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename('unsupported.ext' ) )
f.write(__snake_case , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(__snake_case , 'w' , encoding='utf-8' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 29 | 1 |
# Imports
import numpy as np
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None ) -> Tuple:
self.set_matricies(red=_UpperCamelCase , green=_UpperCamelCase , blue=_UpperCamelCase , red_edge=_UpperCamelCase , nir=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None ) -> Dict:
if red is not None:
UpperCAmelCase_ : int = red
if green is not None:
UpperCAmelCase_ : Dict = green
if blue is not None:
UpperCAmelCase_ : str = blue
if red_edge is not None:
UpperCAmelCase_ : int = red_edge
if nir is not None:
UpperCAmelCase_ : Tuple = nir
return True
def __UpperCAmelCase ( self , _UpperCamelCase="" , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None ) -> List[Any]:
self.set_matricies(red=_UpperCamelCase , green=_UpperCamelCase , blue=_UpperCamelCase , red_edge=_UpperCamelCase , nir=_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!' )
return False
def __UpperCAmelCase ( self ) -> str:
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def __UpperCAmelCase ( self ) -> Tuple:
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def __UpperCAmelCase ( self ) -> Union[str, Any]:
return self.nir * (self.red / (self.green**2))
def __UpperCAmelCase ( self ) -> Any:
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def __UpperCAmelCase ( self ) -> Union[str, Any]:
return (self.nir - self.red) / (self.nir + self.red)
def __UpperCAmelCase ( self ) -> Union[str, Any]:
return (self.nir - self.blue) / (self.nir + self.blue)
def __UpperCAmelCase ( self ) -> List[str]:
return (self.redEdge - self.red) / (self.redEdge + self.red)
def __UpperCAmelCase ( self ) -> Optional[int]:
return (self.nir - self.green) / (self.nir + self.green)
def __UpperCAmelCase ( self ) -> Any:
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def __UpperCAmelCase ( self ) -> Optional[Any]:
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def __UpperCAmelCase ( self ) -> Tuple:
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def __UpperCAmelCase ( self ) -> Optional[int]:
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def __UpperCAmelCase ( self , _UpperCamelCase=0.08 , _UpperCamelCase=1.22 , _UpperCamelCase=0.03 ) -> Tuple:
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def __UpperCAmelCase ( self ) -> List[Any]:
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def __UpperCAmelCase ( self ) -> Tuple:
return (self.nir / self.green) - 1
def __UpperCAmelCase ( self ) -> List[str]:
return (self.nir / self.redEdge) - 1
def __UpperCAmelCase ( self ) -> Any:
return (self.red - self.blue) / self.red
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : str = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def __UpperCAmelCase ( self ) -> Any:
return self.nir - self.green
def __UpperCAmelCase ( self ) -> Tuple:
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : List[Any] = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def __UpperCAmelCase ( self , _UpperCamelCase=0.16 ) -> List[Any]:
return (self.nir - self.green) / (self.nir + self.green + y)
def __UpperCAmelCase ( self , _UpperCamelCase=0.5 ) -> Union[str, Any]:
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def __UpperCAmelCase ( self ) -> Any:
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def __UpperCAmelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None ) -> Dict:
return (self.nir - b) / (a * self.red)
def __UpperCAmelCase ( self ) -> Any:
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def __UpperCAmelCase ( self ) -> List[str]:
return (self.red + self.green + self.blue) / 30.5
def __UpperCAmelCase ( self ) -> Optional[int]:
return self.nir / self.red
def __UpperCAmelCase ( self ) -> Tuple:
return (self.rvi() - 1) / (self.rvi() + 1)
def __UpperCAmelCase ( self ) -> str:
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def __UpperCAmelCase ( self ) -> Optional[Any]:
return self.green / (self.nir + self.red + self.green)
def __UpperCAmelCase ( self ) -> List[Any]:
return self.nir / (self.nir + self.red + self.green)
def __UpperCAmelCase ( self ) -> str:
return self.red / (self.nir + self.red + self.green)
def __UpperCAmelCase ( self ) -> List[Any]:
return (self.green - self.red) / (self.green + self.red)
def __UpperCAmelCase ( self ) -> List[Any]:
return (self.red - self.green) / (self.red + self.green)
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
UpperCAmelCase_ : List[str] = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def __UpperCAmelCase ( self ) -> List[str]:
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def __UpperCAmelCase ( self ) -> str:
return self.nir / self.red
def __UpperCAmelCase ( self ) -> List[str]:
return (self.ndvi() + 0.5) ** (1 / 2)
def __UpperCAmelCase ( self ) -> str:
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 29 |
from __future__ import annotations
def lowercase__ ( __snake_case : tuple[int, int] , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position
UpperCAmelCase_ : str = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCAmelCase_ : Optional[Any] = []
for position in positions:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__snake_case )
return permissible_positions
def lowercase__ ( __snake_case : list[list[int]] ):
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def lowercase__ ( __snake_case : list[list[int]] , __snake_case : tuple[int, int] , __snake_case : int ):
'''simple docstring'''
if is_complete(__snake_case ):
return True
for position in get_valid_pos(__snake_case , len(__snake_case ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Any = position
if board[y][x] == 0:
UpperCAmelCase_ : Optional[Any] = curr + 1
if open_knight_tour_helper(__snake_case , __snake_case , curr + 1 ):
return True
UpperCAmelCase_ : List[Any] = 0
return False
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : str = [[0 for i in range(__snake_case )] for j in range(__snake_case )]
for i in range(__snake_case ):
for j in range(__snake_case ):
UpperCAmelCase_ : Optional[Any] = 1
if open_knight_tour_helper(__snake_case , (i, j) , 1 ):
return board
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : List[str] = F"Open Kight Tour cannot be performed on a board of size {n}"
raise ValueError(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 1 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : str , __snake_case : Dict , __snake_case : int , __snake_case : str=True , __snake_case : Optional[Any]="pt" ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = {'add_prefix_space': True} if isinstance(__snake_case , __snake_case ) and not line.startswith(' ' ) else {}
UpperCAmelCase_ : str = padding_side
return tokenizer(
[line] , max_length=__snake_case , padding='max_length' if pad_to_max_length else None , truncation=__snake_case , return_tensors=__snake_case , add_special_tokens=__snake_case , **__snake_case , )
def lowercase__ ( __snake_case : int , __snake_case : Tuple , __snake_case : str=None , ):
'''simple docstring'''
UpperCAmelCase_ : str = input_ids.ne(__snake_case ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase="train" , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase="" , ) -> Any:
super().__init__()
UpperCAmelCase_ : Optional[Any] = Path(_UpperCamelCase ).joinpath(type_path + '.source' )
UpperCAmelCase_ : Dict = Path(_UpperCamelCase ).joinpath(type_path + '.target' )
UpperCAmelCase_ : Optional[Any] = self.get_char_lens(self.src_file )
UpperCAmelCase_ : Dict = max_source_length
UpperCAmelCase_ : List[Any] = max_target_length
assert min(self.src_lens ) > 0, f"found empty line in {self.src_file}"
UpperCAmelCase_ : List[Any] = tokenizer
UpperCAmelCase_ : Tuple = prefix
if n_obs is not None:
UpperCAmelCase_ : int = self.src_lens[:n_obs]
UpperCAmelCase_ : Optional[int] = src_lang
UpperCAmelCase_ : List[str] = tgt_lang
def __len__( self ) -> Dict:
return len(self.src_lens )
def __getitem__( self , _UpperCamelCase ) -> Dict[str, torch.Tensor]:
UpperCAmelCase_ : Union[str, Any] = index + 1 # linecache starts at 1
UpperCAmelCase_ : Any = self.prefix + linecache.getline(str(self.src_file ) , _UpperCamelCase ).rstrip('\n' )
UpperCAmelCase_ : Optional[int] = linecache.getline(str(self.tgt_file ) , _UpperCamelCase ).rstrip('\n' )
assert source_line, f"empty source line for index {index}"
assert tgt_line, f"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _UpperCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
UpperCAmelCase_ : str = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _UpperCamelCase ) else self.tokenizer
)
UpperCAmelCase_ : Optional[Any] = self.tokenizer.generator if isinstance(self.tokenizer , _UpperCamelCase ) else self.tokenizer
UpperCAmelCase_ : str = encode_line(_UpperCamelCase , _UpperCamelCase , self.max_source_length , 'right' )
UpperCAmelCase_ : List[Any] = encode_line(_UpperCamelCase , _UpperCamelCase , self.max_target_length , 'right' )
UpperCAmelCase_ : Optional[Any] = source_inputs['input_ids'].squeeze()
UpperCAmelCase_ : Any = target_inputs['input_ids'].squeeze()
UpperCAmelCase_ : int = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __UpperCAmelCase ( _UpperCamelCase ) -> Optional[int]:
return [len(_UpperCamelCase ) for x in Path(_UpperCamelCase ).open().readlines()]
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Dict[str, torch.Tensor]:
UpperCAmelCase_ : List[str] = torch.stack([x['input_ids'] for x in batch] )
UpperCAmelCase_ : int = torch.stack([x['attention_mask'] for x in batch] )
UpperCAmelCase_ : Tuple = torch.stack([x['decoder_input_ids'] for x in batch] )
UpperCAmelCase_ : Tuple = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _UpperCamelCase )
else self.tokenizer.pad_token_id
)
UpperCAmelCase_ : str = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _UpperCamelCase )
else self.tokenizer.pad_token_id
)
UpperCAmelCase_ : Union[str, Any] = trim_batch(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = trim_batch(_UpperCamelCase , _UpperCamelCase , attention_mask=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
__UpperCAmelCase = getLogger(__name__)
def lowercase__ ( __snake_case : List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(__snake_case ) )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = get_git_info()
save_json(__snake_case , os.path.join(__snake_case , 'git_log.json' ) )
def lowercase__ ( __snake_case : Optional[int] , __snake_case : int , __snake_case : List[Any]=4 , **__snake_case : Optional[Any] ):
'''simple docstring'''
with open(__snake_case , 'w' ) as f:
json.dump(__snake_case , __snake_case , indent=__snake_case , **__snake_case )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
with open(__snake_case ) as f:
return json.load(__snake_case )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : int = git.Repo(search_parent_directories=__snake_case )
UpperCAmelCase_ : Tuple = {
'repo_id': str(__snake_case ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def lowercase__ ( __snake_case : Callable , __snake_case : Iterable ):
'''simple docstring'''
return list(map(__snake_case , __snake_case ) )
def lowercase__ ( __snake_case : Tuple , __snake_case : Any ):
'''simple docstring'''
with open(__snake_case , 'wb' ) as f:
return pickle.dump(__snake_case , __snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
def remove_articles(__snake_case : Optional[int] ):
return re.sub(R'\b(a|an|the)\b' , ' ' , __snake_case )
def white_space_fix(__snake_case : Any ):
return " ".join(text.split() )
def remove_punc(__snake_case : int ):
UpperCAmelCase_ : Optional[int] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__snake_case : int ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__snake_case ) ) ) )
def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Any = normalize_answer(__snake_case ).split()
UpperCAmelCase_ : str = normalize_answer(__snake_case ).split()
UpperCAmelCase_ : Tuple = Counter(__snake_case ) & Counter(__snake_case )
UpperCAmelCase_ : Optional[Any] = sum(common.values() )
if num_same == 0:
return 0
UpperCAmelCase_ : List[Any] = 1.0 * num_same / len(__snake_case )
UpperCAmelCase_ : Tuple = 1.0 * num_same / len(__snake_case )
UpperCAmelCase_ : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : str ):
'''simple docstring'''
return normalize_answer(__snake_case ) == normalize_answer(__snake_case )
def lowercase__ ( __snake_case : List[str] , __snake_case : List[str] ):
'''simple docstring'''
assert len(__snake_case ) == len(__snake_case )
UpperCAmelCase_ : Any = 0
for hypo, pred in zip(__snake_case , __snake_case ):
em += exact_match_score(__snake_case , __snake_case )
if len(__snake_case ) > 0:
em /= len(__snake_case )
return {"em": em}
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
return model_prefix.startswith('rag' )
def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Any = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
UpperCAmelCase_ : str = 'dropout_rate'
for p in extra_params:
if getattr(__snake_case , __snake_case , __snake_case ):
if not hasattr(__snake_case , __snake_case ) and not hasattr(__snake_case , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(__snake_case ) )
delattr(__snake_case , __snake_case )
continue
UpperCAmelCase_ : Optional[Any] = p if hasattr(__snake_case , __snake_case ) else equivalent_param[p]
setattr(__snake_case , __snake_case , getattr(__snake_case , __snake_case ) )
delattr(__snake_case , __snake_case )
return hparams, config
| 29 |
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : list[list[int]] = [[0 for _ in range(__snake_case )] for _ in range(m + 1 )]
for i in range(m + 1 ):
UpperCAmelCase_ : Optional[Any] = 1
for n in range(m + 1 ):
for k in range(1 , __snake_case ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__UpperCAmelCase = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 29 | 1 |
def lowercase__ ( __snake_case : str , __snake_case : str = " " ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : Optional[Any] = 0
for index, char in enumerate(__snake_case ):
if char == separator:
split_words.append(string[last_index:index] )
UpperCAmelCase_ : Dict = index + 1
elif index + 1 == len(__snake_case ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 29 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int:
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
self.check_model_type(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = {}, {}
if padding is not None:
UpperCAmelCase_ : List[str] = padding
if truncation is not None:
UpperCAmelCase_ : Tuple = truncation
if top_k is not None:
UpperCAmelCase_ : Dict = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> int:
if isinstance(_UpperCamelCase , (Image.Image, str) ) and isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Optional[Any] = {'image': image, 'question': question}
else:
UpperCAmelCase_ : List[str] = image
UpperCAmelCase_ : Optional[Any] = super().__call__(_UpperCamelCase , **_UpperCamelCase )
return results
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = load_image(inputs['image'] )
UpperCAmelCase_ : Dict = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=_UpperCamelCase , truncation=_UpperCamelCase )
UpperCAmelCase_ : int = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework )
model_inputs.update(_UpperCamelCase )
return model_inputs
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : Any = self.model(**_UpperCamelCase )
return model_outputs
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=5 ) -> str:
if top_k > self.model.config.num_labels:
UpperCAmelCase_ : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ : List[str] = model_outputs.logits.sigmoid()[0]
UpperCAmelCase_ , UpperCAmelCase_ : str = probs.topk(_UpperCamelCase )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
UpperCAmelCase_ : Optional[Any] = scores.tolist()
UpperCAmelCase_ : Tuple = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase )]
| 29 | 1 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> Optional[int]:
super().__init__(
_UpperCamelCase , split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , num_proc=_UpperCamelCase , **_UpperCamelCase , )
UpperCAmelCase_ : Any = field
UpperCAmelCase_ : Tuple = path_or_paths if isinstance(_UpperCamelCase , _UpperCamelCase ) else {self.split: path_or_paths}
UpperCAmelCase_ : int = Json(
cache_dir=_UpperCamelCase , data_files=_UpperCamelCase , features=_UpperCamelCase , field=_UpperCamelCase , **_UpperCamelCase , )
def __UpperCAmelCase ( self ) -> List[Any]:
# Build iterable dataset
if self.streaming:
UpperCAmelCase_ : Optional[Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = None
self.builder.download_and_prepare(
download_config=_UpperCamelCase , download_mode=_UpperCamelCase , verification_mode=_UpperCamelCase , base_path=_UpperCamelCase , num_proc=self.num_proc , )
UpperCAmelCase_ : List[str] = self.builder.as_dataset(
split=self.split , verification_mode=_UpperCamelCase , in_memory=self.keep_in_memory )
return dataset
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> Union[str, Any]:
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0." )
UpperCAmelCase_ : List[Any] = dataset
UpperCAmelCase_ : Optional[int] = path_or_buf
UpperCAmelCase_ : List[str] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCAmelCase_ : str = num_proc
UpperCAmelCase_ : Optional[int] = 'utf-8'
UpperCAmelCase_ : Optional[int] = to_json_kwargs
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : Dict = self.to_json_kwargs.pop('path_or_buf' , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = self.to_json_kwargs.pop('orient' , 'records' )
UpperCAmelCase_ : Any = self.to_json_kwargs.pop('lines' , True if orient == 'records' else False )
UpperCAmelCase_ : str = self.to_json_kwargs.pop('index' , False if orient in ['split', 'table'] else True )
UpperCAmelCase_ : List[str] = self.to_json_kwargs.pop('compression' , _UpperCamelCase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"`datasets` currently does not support {compression} compression" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , 'wb' , compression=_UpperCamelCase ) as buffer:
UpperCAmelCase_ : Any = self._write(file_obj=_UpperCamelCase , orient=_UpperCamelCase , lines=_UpperCamelCase , index=_UpperCamelCase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f"The compression parameter is not supported when writing to a buffer, but compression={compression}"
' was passed. Please provide a local path instead.' )
UpperCAmelCase_ : Tuple = self._write(
file_obj=self.path_or_buf , orient=_UpperCamelCase , lines=_UpperCamelCase , index=_UpperCamelCase , **self.to_json_kwargs )
return written
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = args
UpperCAmelCase_ : List[str] = query_table(
table=self.dataset.data , key=slice(_UpperCamelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
UpperCAmelCase_ : str = batch.to_pandas().to_json(
path_or_buf=_UpperCamelCase , orient=_UpperCamelCase , lines=_UpperCamelCase , index=_UpperCamelCase , **_UpperCamelCase )
if not json_str.endswith('\n' ):
json_str += "\n"
return json_str.encode(self.encoding )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase , ) -> int:
UpperCAmelCase_ : Any = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ):
UpperCAmelCase_ : List[Any] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(_UpperCamelCase )
else:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , _UpperCamelCase , _UpperCamelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ):
written += file_obj.write(_UpperCamelCase )
return written
| 29 |
import os
# Precomputes a list of the 100 first triangular numbers
__UpperCAmelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Any = os.path.dirname(os.path.realpath(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = os.path.join(__snake_case , 'words.txt' )
UpperCAmelCase_ : Union[str, Any] = ''
with open(__snake_case ) as f:
UpperCAmelCase_ : List[Any] = f.readline()
UpperCAmelCase_ : Optional[int] = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
UpperCAmelCase_ : Optional[int] = [
word
for word in [sum(ord(__snake_case ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__snake_case )
if __name__ == "__main__":
print(solution())
| 29 | 1 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Any = DistilBertTokenizer
_snake_case : int = DistilBertTokenizerFast
_snake_case : Optional[int] = True
@slow
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Any = DistilBertTokenizer.from_pretrained('distilbert-base-uncased' )
UpperCAmelCase_ : str = tokenizer.encode('sequence builders' , add_special_tokens=_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 29 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__UpperCAmelCase = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__UpperCAmelCase = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
if "://" in dataset_path:
UpperCAmelCase_ : int = dataset_path.split('://' )[1]
return dataset_path
def lowercase__ ( __snake_case : fsspec.AbstractFileSystem ):
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( __snake_case : fsspec.AbstractFileSystem , __snake_case : str , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = not is_remote_filesystem(__snake_case )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__snake_case ) , fs._strip_protocol(__snake_case ) )
else:
fs.mv(__snake_case , __snake_case , recursive=__snake_case )
def lowercase__ ( ):
'''simple docstring'''
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : int = threading.Lock()
| 29 | 1 |
def lowercase__ ( __snake_case : int , __snake_case : int ):
'''simple docstring'''
while b:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = b, a % b
return a
def lowercase__ ( __snake_case : int , __snake_case : int ):
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(__snake_case , a % b )
def lowercase__ ( ):
'''simple docstring'''
print(F"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" )
print(F"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" )
print(F"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" )
print(F"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" )
print(F"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" )
print(F"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" )
print(F"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" )
print(F"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" )
print(F"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" )
print(F"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" )
if __name__ == "__main__":
main()
| 29 |
def lowercase__ ( __snake_case : list ):
'''simple docstring'''
for i in range(len(__snake_case ) - 1 , 0 , -1 ):
UpperCAmelCase_ : Dict = False
for j in range(__snake_case , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Any = unsorted[j - 1], unsorted[j]
UpperCAmelCase_ : int = True
for j in range(__snake_case ):
if unsorted[j] > unsorted[j + 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = unsorted[j + 1], unsorted[j]
UpperCAmelCase_ : Any = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip()
__UpperCAmelCase = [int(item) for item in user_input.split(',')]
print(F'{cocktail_shaker_sort(unsorted) = }')
| 29 | 1 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='%(message)s')
def lowercase__ ( __snake_case : np.ndarray ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def lowercase__ ( __snake_case : np.ndarray , __snake_case : np.ndarray , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = np.nan
for i in range(__snake_case ):
UpperCAmelCase_ : Any = features[:, labels == i]
UpperCAmelCase_ : Tuple = data.mean(1 )
# Centralize the data of class i
UpperCAmelCase_ : List[str] = data - column_reshape(__snake_case )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(__snake_case , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCAmelCase_ : Optional[Any] = np.dot(__snake_case , centered_data.T )
return covariance_sum / features.shape[1]
def lowercase__ ( __snake_case : np.ndarray , __snake_case : np.ndarray , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = features.mean(1 )
UpperCAmelCase_ : Tuple = np.nan
for i in range(__snake_case ):
UpperCAmelCase_ : Tuple = features[:, labels == i]
UpperCAmelCase_ : Union[str, Any] = data.shape[1]
UpperCAmelCase_ : List[str] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(__snake_case ) - column_reshape(__snake_case ) , (column_reshape(__snake_case ) - column_reshape(__snake_case )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCAmelCase_ : Any = device_data * np.dot(
column_reshape(__snake_case ) - column_reshape(__snake_case ) , (column_reshape(__snake_case ) - column_reshape(__snake_case )).T , )
return covariance_sum / features.shape[1]
def lowercase__ ( __snake_case : np.ndarray , __snake_case : int ):
'''simple docstring'''
if features.any():
UpperCAmelCase_ : int = features.mean(1 )
# Center the dataset
UpperCAmelCase_ : Optional[int] = features - np.reshape(__snake_case , (data_mean.size, 1) )
UpperCAmelCase_ : int = np.dot(__snake_case , centered_data.T ) / features.shape[1]
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = np.linalg.eigh(__snake_case )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCAmelCase_ : Optional[int] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCAmelCase_ : Optional[Any] = np.dot(filtered_eigenvectors.T , __snake_case )
logging.info('Principal Component Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=__snake_case )
logging.error('Dataset empty' )
raise AssertionError
def lowercase__ ( __snake_case : np.ndarray , __snake_case : np.ndarray , __snake_case : int , __snake_case : int ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = eigh(
covariance_between_classes(__snake_case , __snake_case , __snake_case ) , covariance_within_classes(__snake_case , __snake_case , __snake_case ) , )
UpperCAmelCase_ : Tuple = eigenvectors[:, ::-1][:, :dimensions]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = np.linalg.svd(__snake_case )
UpperCAmelCase_ : List[str] = svd_matrix[:, 0:dimensions]
UpperCAmelCase_ : Dict = np.dot(filtered_svd_matrix.T , __snake_case )
logging.info('Linear Discriminant Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=__snake_case )
logging.error('Dataset empty' )
raise AssertionError
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCAmelCase_ : Any = np.array([0, 0, 0, 1, 1] )
UpperCAmelCase_ : Optional[int] = 2
UpperCAmelCase_ : Dict = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(__snake_case ) as error_info:
UpperCAmelCase_ : Union[str, Any] = linear_discriminant_analysis(
__snake_case , __snake_case , __snake_case , __snake_case )
if isinstance(__snake_case , np.ndarray ):
raise AssertionError(
'Did not raise AssertionError for dimensions > classes' )
assert error_info.type is AssertionError
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : str = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCAmelCase_ : Optional[Any] = 2
UpperCAmelCase_ : Optional[Any] = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]] )
with pytest.raises(__snake_case ) as error_info:
UpperCAmelCase_ : Dict = principal_component_analysis(__snake_case , __snake_case )
if not np.allclose(__snake_case , __snake_case ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def lowercase__ ( __snake_case : List[str] , __snake_case : int , __snake_case : Tuple=8 ):
'''simple docstring'''
UpperCAmelCase_ : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase_ : List[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowercase__ ( __snake_case : Any , __snake_case : int=512 , __snake_case : Dict=512 ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCAmelCase_ : Dict = np.array(pil_image.convert('RGB' ) )
UpperCAmelCase_ : Any = arr.astype(np.floataa ) / 127.5 - 1
UpperCAmelCase_ : Dict = np.transpose(__snake_case , [2, 0, 1] )
UpperCAmelCase_ : List[str] = torch.from_numpy(__snake_case ).unsqueeze(0 )
return image
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
unet=_UpperCamelCase , scheduler=_UpperCamelCase , movq=_UpperCamelCase , )
UpperCAmelCase_ : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
# get the original timestep using init_timestep
UpperCAmelCase_ : Any = min(int(num_inference_steps * strength ) , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase_ : str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> Tuple:
if not isinstance(_UpperCamelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_UpperCamelCase )}" )
UpperCAmelCase_ : List[str] = image.to(device=_UpperCamelCase , dtype=_UpperCamelCase )
UpperCAmelCase_ : List[str] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCAmelCase_ : List[str] = image
else:
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_UpperCamelCase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Any = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCamelCase )
]
UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase , dim=0 )
else:
UpperCAmelCase_ : Union[str, Any] = self.movq.encode(_UpperCamelCase ).latent_dist.sample(_UpperCamelCase )
UpperCAmelCase_ : int = self.movq.config.scaling_factor * init_latents
UpperCAmelCase_ : Optional[int] = torch.cat([init_latents] , dim=0 )
UpperCAmelCase_ : Tuple = init_latents.shape
UpperCAmelCase_ : List[Any] = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
# get latents
UpperCAmelCase_ : str = self.scheduler.add_noise(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = init_latents
return latents
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Any:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase_ : Optional[Any] = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase_ : str = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = cpu_offload_with_hook(_UpperCamelCase , _UpperCamelCase , prev_module_hook=_UpperCamelCase )
# We'll offload the last model manually.
UpperCAmelCase_ : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self ) -> Dict:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_UpperCamelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 1_0_0 , _UpperCamelCase = 4.0 , _UpperCamelCase = 0.3 , _UpperCamelCase = 1 , _UpperCamelCase = None , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> str:
UpperCAmelCase_ : Any = self._execution_device
UpperCAmelCase_ : Union[str, Any] = guidance_scale > 1.0
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : str = torch.cat(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[Any] = image_embeds.shape[0]
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : int = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : int = negative_image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_UpperCamelCase )
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Tuple = [image]
if not all(isinstance(_UpperCamelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(_UpperCamelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
UpperCAmelCase_ : str = torch.cat([prepare_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) for i in image] , dim=0 )
UpperCAmelCase_ : Any = image.to(dtype=image_embeds.dtype , device=_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.movq.encode(_UpperCamelCase )['latents']
UpperCAmelCase_ : List[Any] = latents.repeat_interleave(_UpperCamelCase , dim=0 )
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.get_timesteps(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCAmelCase_ , UpperCAmelCase_ : str = downscale_height_and_width(_UpperCamelCase , _UpperCamelCase , self.movq_scale_factor )
UpperCAmelCase_ : Dict = self.prepare_latents(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : str = {'image_embeds': image_embeds}
UpperCAmelCase_ : Union[str, Any] = self.unet(
sample=_UpperCamelCase , timestep=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , added_cond_kwargs=_UpperCamelCase , return_dict=_UpperCamelCase , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 )
UpperCAmelCase_ , UpperCAmelCase_ : str = variance_pred.chunk(2 )
UpperCAmelCase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ , UpperCAmelCase_ : int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : List[str] = self.scheduler.step(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase , )[0]
# post-processing
UpperCAmelCase_ : Optional[Any] = self.movq.decode(_UpperCamelCase , force_not_quantize=_UpperCamelCase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCAmelCase_ : List[str] = image * 0.5 + 0.5
UpperCAmelCase_ : List[Any] = image.clamp(0 , 1 )
UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : List[Any] = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCamelCase )
| 29 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase=7 , _UpperCamelCase=3 , _UpperCamelCase=1_8 , _UpperCamelCase=3_0 , _UpperCamelCase=4_0_0 , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=True , ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = size if size is not None else {'shortest_edge': 2_0}
UpperCAmelCase_ : Tuple = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
UpperCAmelCase_ : Dict = parent
UpperCAmelCase_ : Optional[int] = batch_size
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : List[str] = image_size
UpperCAmelCase_ : str = min_resolution
UpperCAmelCase_ : Optional[int] = max_resolution
UpperCAmelCase_ : Union[str, Any] = do_resize
UpperCAmelCase_ : List[Any] = size
UpperCAmelCase_ : List[str] = do_center_crop
UpperCAmelCase_ : int = crop_size
UpperCAmelCase_ : Union[str, Any] = do_flip_channel_order
def __UpperCAmelCase ( self ) -> Optional[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class lowerCamelCase (_snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : Tuple = MobileViTImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : int = MobileViTImageProcessingTester(self )
@property
def __UpperCAmelCase ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'size' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'center_crop' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_flip_channel_order' ) )
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 2_0} )
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} )
UpperCAmelCase_ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} )
def __UpperCAmelCase ( self ) -> List[str]:
pass
def __UpperCAmelCase ( self ) -> Tuple:
# Initialize image_processing
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCAmelCase_ : str = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __UpperCAmelCase ( self ) -> str:
# Initialize image_processing
UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
UpperCAmelCase_ : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCAmelCase_ : Optional[int] = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __UpperCAmelCase ( self ) -> Tuple:
# Initialize image_processing
UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCAmelCase_ : Union[str, Any] = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 29 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowercase__ ( __snake_case : List[Any] , __snake_case : List[str]=False ):
'''simple docstring'''
try:
UpperCAmelCase_ : int = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCAmelCase_ : Optional[int] = default
else:
# KEY is set, convert it to True or False.
try:
UpperCAmelCase_ : List[Any] = strtobool(__snake_case )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
__UpperCAmelCase = parse_flag_from_env('RUN_SLOW', default=False)
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
return unittest.skip('Test was skipped' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(__snake_case )
def lowercase__ ( __snake_case : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(__snake_case )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(__snake_case )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(__snake_case )
def lowercase__ ( __snake_case : Dict=None , __snake_case : Dict=None ):
'''simple docstring'''
if test_case is None:
return partial(__snake_case , version=__snake_case )
return unittest.skipUnless(is_torch_version('>=' , __snake_case ) , F"test requires torch version >= {version}" )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(__snake_case )
__UpperCAmelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(__snake_case )
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
_snake_case : Union[str, Any] = True
@classmethod
def __UpperCAmelCase ( cls ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = tempfile.mkdtemp()
@classmethod
def __UpperCAmelCase ( cls ) -> List[str]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __UpperCAmelCase ( self ) -> str:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_UpperCamelCase )
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Optional[int]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : List[Any] = mocks if isinstance(_UpperCamelCase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = AcceleratorState()
UpperCAmelCase_ : str = tensor[None].clone().to(state.device )
UpperCAmelCase_ : List[str] = gather(__snake_case ).cpu()
UpperCAmelCase_ : List[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __snake_case ):
return False
return True
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : str = returncode
UpperCAmelCase_ : Optional[Any] = stdout
UpperCAmelCase_ : Optional[Any] = stderr
async def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Optional[int] ):
'''simple docstring'''
while True:
UpperCAmelCase_ : Dict = await stream.readline()
if line:
callback(__snake_case )
else:
break
async def lowercase__ ( __snake_case : Optional[int] , __snake_case : Dict=None , __snake_case : str=None , __snake_case : Dict=None , __snake_case : List[str]=False , __snake_case : Optional[int]=False ):
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__snake_case , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__snake_case , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : str = []
def tee(__snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Optional[int]="" ):
UpperCAmelCase_ : List[str] = line.decode('utf-8' ).rstrip()
sink.append(__snake_case )
if not quiet:
print(__snake_case , __snake_case , file=__snake_case )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __snake_case : tee(__snake_case , __snake_case , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __snake_case : tee(__snake_case , __snake_case , sys.stderr , label='stderr:' ) ) ),
] , timeout=__snake_case , )
return _RunOutput(await p.wait() , __snake_case , __snake_case )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[Any]=None , __snake_case : str=None , __snake_case : Tuple=180 , __snake_case : Dict=False , __snake_case : Optional[Any]=True ):
'''simple docstring'''
UpperCAmelCase_ : str = asyncio.get_event_loop()
UpperCAmelCase_ : int = loop.run_until_complete(
_stream_subprocess(__snake_case , env=__snake_case , stdin=__snake_case , timeout=__snake_case , quiet=__snake_case , echo=__snake_case ) )
UpperCAmelCase_ : int = ' '.join(__snake_case )
if result.returncode > 0:
UpperCAmelCase_ : int = '\n'.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class lowerCamelCase (_snake_case ):
'''simple docstring'''
pass
def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any]=False ):
'''simple docstring'''
try:
UpperCAmelCase_ : List[Any] = subprocess.check_output(__snake_case , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__snake_case , 'decode' ):
UpperCAmelCase_ : str = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(__snake_case )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 29 | 1 |
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = None , **_UpperCamelCase , ) -> int:
UpperCAmelCase_ : Dict = path_or_paths
UpperCAmelCase_ : Union[str, Any] = split if split or isinstance(_UpperCamelCase , _UpperCamelCase ) else 'train'
UpperCAmelCase_ : Dict = features
UpperCAmelCase_ : Optional[int] = cache_dir
UpperCAmelCase_ : int = keep_in_memory
UpperCAmelCase_ : List[str] = streaming
UpperCAmelCase_ : Any = num_proc
UpperCAmelCase_ : List[Any] = kwargs
@abstractmethod
def __UpperCAmelCase ( self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = None , **_UpperCamelCase , ) -> Any:
UpperCAmelCase_ : Any = features
UpperCAmelCase_ : List[Any] = cache_dir
UpperCAmelCase_ : List[Any] = keep_in_memory
UpperCAmelCase_ : Any = streaming
UpperCAmelCase_ : str = num_proc
UpperCAmelCase_ : Optional[Any] = kwargs
@abstractmethod
def __UpperCAmelCase ( self ) -> Union[Dataset, IterableDataset]:
pass
| 29 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__UpperCAmelCase = logging.getLogger(__name__)
def lowercase__ ( __snake_case : List[Any]=2 , __snake_case : Union[str, Any]=3 , __snake_case : Any=16 , __snake_case : int = 10 , __snake_case : int = 2 ):
'''simple docstring'''
def get_dataset(__snake_case : Optional[Any] ):
UpperCAmelCase_ : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCAmelCase_ : Any = get_dataset(__snake_case )
UpperCAmelCase_ : str = get_dataset(__snake_case )
UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowercase__ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple=None ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = []
for epoch in range(__snake_case ):
# Train quickly
model.train()
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = batch
UpperCAmelCase_ : List[Any] = model(__snake_case )
UpperCAmelCase_ : int = torch.nn.functional.mse_loss(__snake_case , __snake_case )
accelerator.backward(__snake_case )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class lowerCamelCase (nn.Module ):
'''simple docstring'''
def __init__( self ) -> Optional[Any]:
super().__init__()
UpperCAmelCase_ : List[Any] = nn.Parameter(torch.randn(1 ) )
UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.randn(1 ) )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[Any]:
return x * self.a + self.b
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders()
UpperCAmelCase_ : Optional[int] = ProjectConfiguration(total_limit=1 , project_dir=_UpperCamelCase , automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : Dict = Accelerator(project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Optional[Any] = DummyModel()
UpperCAmelCase_ : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders()
# Train baseline
UpperCAmelCase_ : Tuple = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
UpperCAmelCase_ : Any = os.path.join(_UpperCamelCase , 'initial' )
accelerator.save_state(_UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase_ : Dict = optimizer.state_dict()
UpperCAmelCase_ : Union[str, Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Union[str, Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Any = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase_ : int = DummyModel()
UpperCAmelCase_ : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : str = dummy_dataloaders()
UpperCAmelCase_ : Optional[Any] = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
accelerator.load_state(_UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : List[str] = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Dict = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save everything
UpperCAmelCase_ : Union[str, Any] = os.path.join(_UpperCamelCase , 'checkpoint' )
accelerator.save_state(_UpperCamelCase )
# Load everything back in and make sure all states work
accelerator.load_state(_UpperCamelCase )
test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dummy_dataloaders()
UpperCAmelCase_ : Any = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : str = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
UpperCAmelCase_ : Optional[Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Tuple = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase_ : Any = DummyModel()
UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders()
UpperCAmelCase_ : Tuple = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) )
((UpperCAmelCase_) , (UpperCAmelCase_)) : str = model.a.item(), model.b.item()
UpperCAmelCase_ : List[Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : List[Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Dict = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Optional[Any] = torch.tensor([1, 2, 3] )
UpperCAmelCase_ : Any = torch.tensor([2, 3, 4] )
UpperCAmelCase_ : Union[str, Any] = DummyModel()
UpperCAmelCase_ : List[str] = torch.optim.Adam(net.parameters() )
UpperCAmelCase_ : Any = Accelerator()
with self.assertRaises(_UpperCamelCase ) as ve:
accelerator.register_for_checkpointing(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[int] = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : int = DummyModel()
UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ : Dict = torch.optim.lr_scheduler.StepLR(_UpperCamelCase , step_size=1 , gamma=0.99 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders()
UpperCAmelCase_ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : Tuple = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
UpperCAmelCase_ : Dict = scheduler.state_dict()
train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
self.assertNotEqual(_UpperCamelCase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(_UpperCamelCase , scheduler.state_dict() )
def __UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Optional[int] = DummyModel()
UpperCAmelCase_ : Dict = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase , total_limit=2 )
# Train baseline
UpperCAmelCase_ : Optional[int] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ : str = accelerator.prepare(_UpperCamelCase )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : List[str] = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
__UpperCAmelCase = '/tmp/accelerate/state_checkpointing'
__UpperCAmelCase = DummyModel()
__UpperCAmelCase = torch.optim.Adam(params=model.parameters(), lr=1E-3)
__UpperCAmelCase = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
__UpperCAmelCase , __UpperCAmelCase = dummy_dataloaders()
__UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__UpperCAmelCase = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert param_device.type == accelerator.device.type
__UpperCAmelCase = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 29 | 1 |
import functools
def lowercase__ ( __snake_case : str , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = len(__snake_case )
UpperCAmelCase_ : Dict = len(__snake_case )
@functools.cache
def min_distance(__snake_case : int , __snake_case : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase_ : List[str] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __snake_case ) , 1 + min_distance(__snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> None:
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
| 29 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : Dict = StableDiffusionSAGPipeline
_snake_case : Optional[int] = TEXT_TO_IMAGE_PARAMS
_snake_case : str = TEXT_TO_IMAGE_BATCH_PARAMS
_snake_case : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_snake_case : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_snake_case : str = False
def __UpperCAmelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
UpperCAmelCase_ : List[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
UpperCAmelCase_ : Any = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=_UpperCamelCase , set_alpha_to_one=_UpperCamelCase , )
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
UpperCAmelCase_ : int = CLIPTextModel(_UpperCamelCase )
UpperCAmelCase_ : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCAmelCase_ : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> Dict:
if str(_UpperCamelCase ).startswith('mps' ):
UpperCAmelCase_ : Optional[int] = torch.manual_seed(_UpperCamelCase )
else:
UpperCAmelCase_ : List[Any] = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
UpperCAmelCase_ : Tuple = {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self ) -> Any:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : Any = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
UpperCAmelCase_ : str = sag_pipe.to(_UpperCamelCase )
sag_pipe.set_progress_bar_config(disable=_UpperCamelCase )
UpperCAmelCase_ : Dict = '.'
UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = sag_pipe(
[prompt] , generator=_UpperCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type='np' )
UpperCAmelCase_ : Tuple = output.images
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCAmelCase_ : List[str] = np.array([0.15_68, 0.17_38, 0.16_95, 0.16_93, 0.15_07, 0.17_05, 0.15_47, 0.17_51, 0.19_49] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : int = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
UpperCAmelCase_ : int = sag_pipe.to(_UpperCamelCase )
sag_pipe.set_progress_bar_config(disable=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = '.'
UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase_ : str = sag_pipe(
[prompt] , generator=_UpperCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type='np' )
UpperCAmelCase_ : Any = output.images
UpperCAmelCase_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCAmelCase_ : Optional[Any] = np.array([0.34_59, 0.28_76, 0.25_37, 0.30_02, 0.26_71, 0.21_60, 0.30_26, 0.22_62, 0.23_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
UpperCAmelCase_ : Optional[int] = sag_pipe.to(_UpperCamelCase )
sag_pipe.set_progress_bar_config(disable=_UpperCamelCase )
UpperCAmelCase_ : Any = '.'
UpperCAmelCase_ : int = torch.manual_seed(0 )
UpperCAmelCase_ : str = sag_pipe(
[prompt] , width=7_6_8 , height=5_1_2 , generator=_UpperCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type='np' , )
UpperCAmelCase_ : Dict = output.images
assert image.shape == (1, 5_1_2, 7_6_8, 3)
| 29 |
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if not head:
return True
# split the list to two parts
UpperCAmelCase_ , UpperCAmelCase_ : Any = head.next, head
while fast and fast.next:
UpperCAmelCase_ : str = fast.next.next
UpperCAmelCase_ : Union[str, Any] = slow.next
UpperCAmelCase_ : int = slow.next
UpperCAmelCase_ : List[Any] = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase_ : Tuple = None
while second:
UpperCAmelCase_ : int = second.next
UpperCAmelCase_ : Any = node
UpperCAmelCase_ : Optional[Any] = second
UpperCAmelCase_ : Tuple = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase_ : Optional[Any] = node.next
UpperCAmelCase_ : Dict = head.next
return True
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase_ : Any = head
while fast and fast.next:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase_ : List[str] = [slow.val]
while slow.next:
UpperCAmelCase_ : List[str] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase_ : int = cur.next
return True
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if not head or not head.next:
return True
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : int = 0
while head:
if head.val in d:
d[head.val].append(__snake_case )
else:
UpperCAmelCase_ : List[Any] = [pos]
UpperCAmelCase_ : Any = head.next
pos += 1
UpperCAmelCase_ : Dict = pos - 1
UpperCAmelCase_ : Optional[int] = 0
for v in d.values():
if len(__snake_case ) % 2 != 0:
middle += 1
else:
UpperCAmelCase_ : int = 0
for i in range(0 , len(__snake_case ) ):
if v[i] + v[len(__snake_case ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 29 | 1 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCamelCase (_snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : Tuple = VideoToVideoSDPipeline
_snake_case : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'''video'''} ) - {'''image''', '''width''', '''height'''}
_snake_case : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''video'''} ) - {'''image'''}
_snake_case : str = PipelineTesterMixin.required_optional_params - {'''latents'''}
_snake_case : Any = False
# No `output_type`.
_snake_case : List[str] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def __UpperCAmelCase ( self ) -> Dict:
torch.manual_seed(0 )
UpperCAmelCase_ : str = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=3_2 , attention_head_dim=4 , )
UpperCAmelCase_ : List[Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=_UpperCamelCase , set_alpha_to_one=_UpperCamelCase , )
torch.manual_seed(0 )
UpperCAmelCase_ : Tuple = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
UpperCAmelCase_ : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
UpperCAmelCase_ : Optional[Any] = CLIPTextModel(_UpperCamelCase )
UpperCAmelCase_ : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCAmelCase_ : List[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> Optional[int]:
# 3 frames
UpperCAmelCase_ : Any = floats_tensor((1, 3, 3, 3_2, 3_2) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
if str(_UpperCamelCase ).startswith('mps' ):
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(_UpperCamelCase )
else:
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
UpperCAmelCase_ : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : Optional[Any] = VideoToVideoSDPipeline(**_UpperCamelCase )
UpperCAmelCase_ : List[Any] = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_UpperCamelCase )
UpperCAmelCase_ : Tuple = 'np'
UpperCAmelCase_ : List[Any] = sd_pipe(**_UpperCamelCase ).frames
UpperCAmelCase_ : Union[str, Any] = frames[0][-3:, -3:, -1]
assert frames[0].shape == (3_2, 3_2, 3)
UpperCAmelCase_ : int = np.array([1_0_6, 1_1_7, 1_1_3, 1_7_4, 1_3_7, 1_1_2, 1_4_8, 1_5_1, 1_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCAmelCase ( self ) -> int:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_UpperCamelCase , expected_max_diff=5E-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def __UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def __UpperCAmelCase ( self ) -> Tuple:
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def __UpperCAmelCase ( self ) -> Tuple:
pass
def __UpperCAmelCase ( self ) -> int:
return super().test_progress_bar()
@slow
@skip_mps
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : Tuple = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
UpperCAmelCase_ : Dict = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCAmelCase_ : Any = torch.randn((1, 1_0, 3, 1_0_2_4, 5_7_6) , generator=_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = video.to('cuda' )
UpperCAmelCase_ : List[Any] = 'Spiderman is surfing'
UpperCAmelCase_ : List[str] = pipe(_UpperCamelCase , video=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=3 , output_type='pt' ).frames
UpperCAmelCase_ : List[Any] = np.array([-1.0_45_89_84, -1.1_27_92_97, -0.9_66_30_86, -0.91_50_39_06, -0.75_09_76_56] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 29 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 | 1 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = '''AutoTokenizer'''
_snake_case : List[str] = ['''tokenizer''']
_snake_case : List[str] = {
'''semantic_prompt''': 1,
'''coarse_prompt''': 2,
'''fine_prompt''': 2,
}
def __init__( self , _UpperCamelCase , _UpperCamelCase=None ) -> int:
super().__init__(_UpperCamelCase )
UpperCAmelCase_ : List[str] = speaker_embeddings
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase="speaker_embeddings_path.json" , **_UpperCamelCase ) -> Any:
if speaker_embeddings_dict_path is not None:
UpperCAmelCase_ : List[Any] = get_file_from_repo(
_UpperCamelCase , _UpperCamelCase , subfolder=kwargs.pop('subfolder' , _UpperCamelCase ) , cache_dir=kwargs.pop('cache_dir' , _UpperCamelCase ) , force_download=kwargs.pop('force_download' , _UpperCamelCase ) , proxies=kwargs.pop('proxies' , _UpperCamelCase ) , resume_download=kwargs.pop('resume_download' , _UpperCamelCase ) , local_files_only=kwargs.pop('local_files_only' , _UpperCamelCase ) , use_auth_token=kwargs.pop('use_auth_token' , _UpperCamelCase ) , revision=kwargs.pop('revision' , _UpperCamelCase ) , )
if speaker_embeddings_path is None:
logger.warning(
f"`{os.path.join(_UpperCamelCase , _UpperCamelCase )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`." )
UpperCAmelCase_ : Tuple = None
else:
with open(_UpperCamelCase ) as speaker_embeddings_json:
UpperCAmelCase_ : int = json.load(_UpperCamelCase )
else:
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
return cls(tokenizer=_UpperCamelCase , speaker_embeddings=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase="speaker_embeddings_path.json" , _UpperCamelCase="speaker_embeddings" , _UpperCamelCase = False , **_UpperCamelCase , ) -> Optional[int]:
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_UpperCamelCase , _UpperCamelCase , 'v2' ) , exist_ok=_UpperCamelCase )
UpperCAmelCase_ : Dict = {}
UpperCAmelCase_ : Tuple = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
UpperCAmelCase_ : int = self._load_voice_preset(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] , _UpperCamelCase , f"{prompt_key}_{key}" ) , voice_preset[key] , allow_pickle=_UpperCamelCase , )
UpperCAmelCase_ : Optional[Any] = os.path.join(_UpperCamelCase , f"{prompt_key}_{key}.npy" )
UpperCAmelCase_ : str = tmp_dict
with open(os.path.join(_UpperCamelCase , _UpperCamelCase ) , 'w' ) as fp:
json.dump(_UpperCamelCase , _UpperCamelCase )
super().save_pretrained(_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase = None , **_UpperCamelCase ) -> int:
UpperCAmelCase_ : str = self.speaker_embeddings[voice_preset]
UpperCAmelCase_ : Optional[int] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." )
UpperCAmelCase_ : Optional[Any] = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , _UpperCamelCase ) , cache_dir=kwargs.pop('cache_dir' , _UpperCamelCase ) , force_download=kwargs.pop('force_download' , _UpperCamelCase ) , proxies=kwargs.pop('proxies' , _UpperCamelCase ) , resume_download=kwargs.pop('resume_download' , _UpperCamelCase ) , local_files_only=kwargs.pop('local_files_only' , _UpperCamelCase ) , use_auth_token=kwargs.pop('use_auth_token' , _UpperCamelCase ) , revision=kwargs.pop('revision' , _UpperCamelCase ) , )
if path is None:
raise ValueError(
f"`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings." )
UpperCAmelCase_ : List[str] = np.load(_UpperCamelCase )
return voice_preset_dict
def __UpperCAmelCase ( self , _UpperCamelCase = None ) -> Optional[int]:
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f"Voice preset unrecognized, missing {key} as a key." )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray." )
def __call__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase="pt" , _UpperCamelCase=2_5_6 , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=False , **_UpperCamelCase , ) -> List[str]:
if voice_preset is not None and not isinstance(_UpperCamelCase , _UpperCamelCase ):
if (
isinstance(_UpperCamelCase , _UpperCamelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
UpperCAmelCase_ : List[Any] = self._load_voice_preset(_UpperCamelCase )
else:
if isinstance(_UpperCamelCase , _UpperCamelCase ) and not voice_preset.endswith('.npz' ):
UpperCAmelCase_ : str = voice_preset + '.npz'
UpperCAmelCase_ : Optional[int] = np.load(_UpperCamelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = self.tokenizer(
_UpperCamelCase , return_tensors=_UpperCamelCase , padding='max_length' , max_length=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , add_special_tokens=_UpperCamelCase , **_UpperCamelCase , )
if voice_preset is not None:
UpperCAmelCase_ : int = voice_preset
return encoded_text
| 29 |
__UpperCAmelCase = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 29 | 1 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : Optional[int] = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
UpperCAmelCase_ : Union[str, Any] = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
UpperCAmelCase_ : Optional[Any] = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
UpperCAmelCase_ : str = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 1_6_0_0_0,
'return_attention_mask': False,
'do_normalize': True,
}
UpperCAmelCase_ : List[str] = tempfile.mkdtemp()
UpperCAmelCase_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase_ : int = os.path.join(self.tmpdirname , _UpperCamelCase )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCamelCase ) + '\n' )
with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCamelCase ) + '\n' )
# load decoder from hub
UpperCAmelCase_ : List[Any] = 'hf-internal-testing/ngram-beam-search-decoder'
def __UpperCAmelCase ( self , **_UpperCamelCase ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = self.add_kwargs_tokens_map.copy()
kwargs.update(_UpperCamelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __UpperCAmelCase ( self , **_UpperCamelCase ) -> List[Any]:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __UpperCAmelCase ( self , **_UpperCamelCase ) -> int:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_UpperCamelCase )
def __UpperCAmelCase ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : List[str] = self.get_feature_extractor()
UpperCAmelCase_ : Optional[Any] = self.get_decoder()
UpperCAmelCase_ : List[str] = WavaVecaProcessorWithLM(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase , decoder=_UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Any = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCamelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _UpperCamelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : Optional[Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
UpperCAmelCase_ : Dict = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(_UpperCamelCase , 'include' ):
WavaVecaProcessorWithLM(
tokenizer=_UpperCamelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : Union[str, Any] = self.get_feature_extractor()
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : Optional[Any] = self.get_decoder()
UpperCAmelCase_ : List[Any] = WavaVecaProcessorWithLM(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase , decoder=_UpperCamelCase )
UpperCAmelCase_ : Tuple = floats_list((3, 1_0_0_0) )
UpperCAmelCase_ : List[str] = feature_extractor(_UpperCamelCase , return_tensors='np' )
UpperCAmelCase_ : Any = processor(_UpperCamelCase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : Optional[int] = self.get_feature_extractor()
UpperCAmelCase_ : Optional[int] = self.get_tokenizer()
UpperCAmelCase_ : List[Any] = self.get_decoder()
UpperCAmelCase_ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase , decoder=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = 'This is a test string'
UpperCAmelCase_ : str = processor(text=_UpperCamelCase )
UpperCAmelCase_ : List[str] = tokenizer(_UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self , _UpperCamelCase=(2, 1_0, 1_6) , _UpperCamelCase=7_7 ) -> Optional[Any]:
np.random.seed(_UpperCamelCase )
return np.random.rand(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : int = self.get_feature_extractor()
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = self.get_decoder()
UpperCAmelCase_ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase , decoder=_UpperCamelCase )
UpperCAmelCase_ : int = self._get_dummy_logits(shape=(1_0, 1_6) , seed=1_3 )
UpperCAmelCase_ : Union[str, Any] = processor.decode(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = decoder.decode_beams(_UpperCamelCase )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('</s> <s> </s>' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> str:
UpperCAmelCase_ : List[Any] = self.get_feature_extractor()
UpperCAmelCase_ : str = self.get_tokenizer()
UpperCAmelCase_ : Tuple = self.get_decoder()
UpperCAmelCase_ : Dict = WavaVecaProcessorWithLM(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase , decoder=_UpperCamelCase )
UpperCAmelCase_ : int = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
UpperCAmelCase_ : Union[str, Any] = processor.batch_decode(_UpperCamelCase )
else:
with get_context(_UpperCamelCase ).Pool() as pool:
UpperCAmelCase_ : Union[str, Any] = processor.batch_decode(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = list(_UpperCamelCase )
with get_context('fork' ).Pool() as p:
UpperCAmelCase_ : Dict = decoder.decode_beams_batch(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_UpperCamelCase , decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text )
self.assertListEqual(_UpperCamelCase , decoded_processor.logit_score )
self.assertListEqual(_UpperCamelCase , decoded_processor.lm_score )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : Optional[int] = self.get_feature_extractor()
UpperCAmelCase_ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase_ : Any = self.get_decoder()
UpperCAmelCase_ : int = WavaVecaProcessorWithLM(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase , decoder=_UpperCamelCase )
UpperCAmelCase_ : Any = self._get_dummy_logits()
UpperCAmelCase_ : List[str] = 1_5
UpperCAmelCase_ : str = -20.0
UpperCAmelCase_ : List[Any] = -4.0
UpperCAmelCase_ : Dict = processor.batch_decode(
_UpperCamelCase , beam_width=_UpperCamelCase , beam_prune_logp=_UpperCamelCase , token_min_logp=_UpperCamelCase , )
UpperCAmelCase_ : List[str] = decoded_processor_out.text
UpperCAmelCase_ : Dict = list(_UpperCamelCase )
with get_context('fork' ).Pool() as pool:
UpperCAmelCase_ : Optional[int] = decoder.decode_beams_batch(
_UpperCamelCase , _UpperCamelCase , beam_width=_UpperCamelCase , beam_prune_logp=_UpperCamelCase , token_min_logp=_UpperCamelCase , )
UpperCAmelCase_ : Dict = [d[0][0] for d in decoded_decoder_out]
UpperCAmelCase_ : Union[str, Any] = [d[0][2] for d in decoded_decoder_out]
UpperCAmelCase_ : str = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , _UpperCamelCase )
self.assertTrue(np.array_equal(_UpperCamelCase , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.0_54, -18.4_47] , _UpperCamelCase , atol=1E-3 ) )
self.assertTrue(np.array_equal(_UpperCamelCase , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.5_54, -13.94_74] , _UpperCamelCase , atol=1E-3 ) )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : str = self.get_feature_extractor()
UpperCAmelCase_ : Tuple = self.get_tokenizer()
UpperCAmelCase_ : List[str] = self.get_decoder()
UpperCAmelCase_ : Tuple = WavaVecaProcessorWithLM(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase , decoder=_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = self._get_dummy_logits()
UpperCAmelCase_ : List[Any] = 2.0
UpperCAmelCase_ : List[str] = 5.0
UpperCAmelCase_ : Optional[Any] = -20.0
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : Any = processor.batch_decode(
_UpperCamelCase , alpha=_UpperCamelCase , beta=_UpperCamelCase , unk_score_offset=_UpperCamelCase , lm_score_boundary=_UpperCamelCase , )
UpperCAmelCase_ : Dict = decoded_processor_out.text
UpperCAmelCase_ : Any = list(_UpperCamelCase )
decoder.reset_params(
alpha=_UpperCamelCase , beta=_UpperCamelCase , unk_score_offset=_UpperCamelCase , lm_score_boundary=_UpperCamelCase , )
with get_context('fork' ).Pool() as pool:
UpperCAmelCase_ : Tuple = decoder.decode_beams_batch(
_UpperCamelCase , _UpperCamelCase , )
UpperCAmelCase_ : List[Any] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , _UpperCamelCase )
UpperCAmelCase_ : int = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Any = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
UpperCAmelCase_ : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase_ : Union[str, Any] = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
UpperCAmelCase_ : List[Any] = os.listdir(_UpperCamelCase )
UpperCAmelCase_ : List[str] = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : List[Any] = snapshot_download('hf-internal-testing/processor_with_lm' )
UpperCAmelCase_ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(_UpperCamelCase )
UpperCAmelCase_ : Any = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase_ : Dict = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
UpperCAmelCase_ : int = os.listdir(_UpperCamelCase )
UpperCAmelCase_ : Dict = os.listdir(_UpperCamelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Any = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
UpperCAmelCase_ : Any = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
UpperCAmelCase_ : Union[str, Any] = floats_list((3, 1_0_0_0) )
UpperCAmelCase_ : Tuple = processor_wavaveca(_UpperCamelCase , return_tensors='np' )
UpperCAmelCase_ : List[str] = processor_auto(_UpperCamelCase , return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
UpperCAmelCase_ : Optional[Any] = self._get_dummy_logits()
UpperCAmelCase_ : List[Any] = processor_wavaveca.batch_decode(_UpperCamelCase )
UpperCAmelCase_ : List[str] = processor_auto.batch_decode(_UpperCamelCase )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : str = self.get_feature_extractor()
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : List[str] = self.get_decoder()
UpperCAmelCase_ : Dict = WavaVecaProcessorWithLM(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase , decoder=_UpperCamelCase )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> str:
UpperCAmelCase_ : Optional[int] = [d[key] for d in offsets]
return retrieved_list
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : List[str] = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
UpperCAmelCase_ : Tuple = self._get_dummy_logits()[0]
UpperCAmelCase_ : Union[str, Any] = processor.decode(_UpperCamelCase , output_word_offsets=_UpperCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(_UpperCamelCase , _UpperCamelCase ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
UpperCAmelCase_ : List[Any] = self._get_dummy_logits()
UpperCAmelCase_ : Any = processor.batch_decode(_UpperCamelCase , output_word_offsets=_UpperCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(_UpperCamelCase , _UpperCamelCase ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(_UpperCamelCase , 'word' ) ) for o in outputs['word_offsets']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def __UpperCAmelCase ( self ) -> List[str]:
import torch
UpperCAmelCase_ : Optional[Any] = load_dataset('common_voice' , 'en' , split='train' , streaming=_UpperCamelCase )
UpperCAmelCase_ : Dict = ds.cast_column('audio' , datasets.Audio(sampling_rate=1_6_0_0_0 ) )
UpperCAmelCase_ : Any = iter(_UpperCamelCase )
UpperCAmelCase_ : int = next(_UpperCamelCase )
UpperCAmelCase_ : List[str] = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
UpperCAmelCase_ : List[Any] = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
UpperCAmelCase_ : Dict = processor(sample['audio']['array'] , return_tensors='pt' ).input_values
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(_UpperCamelCase ).logits.cpu().numpy()
UpperCAmelCase_ : List[Any] = processor.decode(logits[0] , output_word_offsets=_UpperCamelCase )
UpperCAmelCase_ : List[str] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
UpperCAmelCase_ : Tuple = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
UpperCAmelCase_ : List[str] = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(_UpperCamelCase , 'word' ) ) , _UpperCamelCase )
self.assertEqual(' '.join(self.get_from_offsets(_UpperCamelCase , 'word' ) ) , output.text )
# output times
UpperCAmelCase_ : Tuple = torch.tensor(self.get_from_offsets(_UpperCamelCase , 'start_time' ) )
UpperCAmelCase_ : List[Any] = torch.tensor(self.get_from_offsets(_UpperCamelCase , 'end_time' ) )
# fmt: off
UpperCAmelCase_ : Tuple = torch.tensor([1.41_99, 1.65_99, 2.25_99, 3.0, 3.24, 3.59_99, 3.79_99, 4.09_99, 4.26, 4.94, 5.28, 5.65_99, 5.78, 5.94, 6.32, 6.53_99, 6.65_99] )
UpperCAmelCase_ : Optional[int] = torch.tensor([1.53_99, 1.89_99, 2.9, 3.16, 3.53_99, 3.72, 4.01_99, 4.17_99, 4.76, 5.15_99, 5.55_99, 5.69_99, 5.86, 6.19_99, 6.38, 6.61_99, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=0.01 ) )
self.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=0.01 ) )
| 29 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : "DiagonalGaussianDistribution"
class lowerCamelCase (_snake_case , _snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = True
@register_to_config
def __init__( self , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = ("DownEncoderBlock2D",) , _UpperCamelCase = ("UpDecoderBlock2D",) , _UpperCamelCase = (6_4,) , _UpperCamelCase = 1 , _UpperCamelCase = "silu" , _UpperCamelCase = 4 , _UpperCamelCase = 3_2 , _UpperCamelCase = 3_2 , _UpperCamelCase = 0.1_82_15 , ) -> List[Any]:
super().__init__()
# pass init params to Encoder
UpperCAmelCase_ : List[str] = Encoder(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , down_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , act_fn=_UpperCamelCase , norm_num_groups=_UpperCamelCase , double_z=_UpperCamelCase , )
# pass init params to Decoder
UpperCAmelCase_ : Dict = Decoder(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , up_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , norm_num_groups=_UpperCamelCase , act_fn=_UpperCamelCase , )
UpperCAmelCase_ : Any = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
UpperCAmelCase_ : List[Any] = nn.Convad(_UpperCamelCase , _UpperCamelCase , 1 )
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : int = False
# only relevant if vae tiling is enabled
UpperCAmelCase_ : Optional[int] = self.config.sample_size
UpperCAmelCase_ : int = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
UpperCAmelCase_ : Union[str, Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
UpperCAmelCase_ : Optional[Any] = 0.25
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False ) -> List[str]:
if isinstance(_UpperCamelCase , (Encoder, Decoder) ):
UpperCAmelCase_ : Union[str, Any] = value
def __UpperCAmelCase ( self , _UpperCamelCase = True ) -> int:
UpperCAmelCase_ : Tuple = use_tiling
def __UpperCAmelCase ( self ) -> Dict:
self.enable_tiling(_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : str = True
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __UpperCAmelCase ( self ) -> Dict[str, AttentionProcessor]:
UpperCAmelCase_ : Optional[int] = {}
def fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if hasattr(_UpperCamelCase , 'set_processor' ):
UpperCAmelCase_ : Optional[int] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return processors
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = len(self.attn_processors.keys() )
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(_UpperCamelCase )} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if hasattr(_UpperCamelCase , 'set_processor' ):
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
module.set_processor(_UpperCamelCase )
else:
module.set_processor(processor.pop(f"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_UpperCamelCase , return_dict=_UpperCamelCase )
if self.use_slicing and x.shape[0] > 1:
UpperCAmelCase_ : Union[str, Any] = [self.encoder(_UpperCamelCase ) for x_slice in x.split(1 )]
UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase )
else:
UpperCAmelCase_ : List[Any] = self.encoder(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = self.quant_conv(_UpperCamelCase )
UpperCAmelCase_ : Tuple = DiagonalGaussianDistribution(_UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_UpperCamelCase , return_dict=_UpperCamelCase )
UpperCAmelCase_ : str = self.post_quant_conv(_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.decoder(_UpperCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
@apply_forward_hook
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
UpperCAmelCase_ : List[str] = [self._decode(_UpperCamelCase ).sample for z_slice in z.split(1 )]
UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase )
else:
UpperCAmelCase_ : Any = self._decode(_UpperCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : Tuple = min(a.shape[2] , b.shape[2] , _UpperCamelCase )
for y in range(_UpperCamelCase ):
UpperCAmelCase_ : str = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Tuple = min(a.shape[3] , b.shape[3] , _UpperCamelCase )
for x in range(_UpperCamelCase ):
UpperCAmelCase_ : int = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput:
UpperCAmelCase_ : Any = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : Tuple = int(self.tile_latent_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : Optional[int] = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
UpperCAmelCase_ : List[str] = []
for i in range(0 , x.shape[2] , _UpperCamelCase ):
UpperCAmelCase_ : Any = []
for j in range(0 , x.shape[3] , _UpperCamelCase ):
UpperCAmelCase_ : Any = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
UpperCAmelCase_ : Dict = self.encoder(_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.quant_conv(_UpperCamelCase )
row.append(_UpperCamelCase )
rows.append(_UpperCamelCase )
UpperCAmelCase_ : str = []
for i, row in enumerate(_UpperCamelCase ):
UpperCAmelCase_ : List[Any] = []
for j, tile in enumerate(_UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Dict = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase )
if j > 0:
UpperCAmelCase_ : List[str] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) )
UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=2 )
UpperCAmelCase_ : List[Any] = DiagonalGaussianDistribution(_UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : str = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : Dict = int(self.tile_sample_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : Dict = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
UpperCAmelCase_ : Union[str, Any] = []
for i in range(0 , z.shape[2] , _UpperCamelCase ):
UpperCAmelCase_ : List[str] = []
for j in range(0 , z.shape[3] , _UpperCamelCase ):
UpperCAmelCase_ : List[str] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
UpperCAmelCase_ : Optional[Any] = self.post_quant_conv(_UpperCamelCase )
UpperCAmelCase_ : Tuple = self.decoder(_UpperCamelCase )
row.append(_UpperCamelCase )
rows.append(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = []
for i, row in enumerate(_UpperCamelCase ):
UpperCAmelCase_ : List[Any] = []
for j, tile in enumerate(_UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Union[str, Any] = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase )
if j > 0:
UpperCAmelCase_ : Optional[Any] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) )
UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : Optional[Any] = sample
UpperCAmelCase_ : Union[str, Any] = self.encode(_UpperCamelCase ).latent_dist
if sample_posterior:
UpperCAmelCase_ : str = posterior.sample(generator=_UpperCamelCase )
else:
UpperCAmelCase_ : int = posterior.mode()
UpperCAmelCase_ : Dict = self.decode(_UpperCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
| 29 | 1 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__UpperCAmelCase = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
__UpperCAmelCase = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
__UpperCAmelCase = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase (datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=4 , _UpperCamelCase=False ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = compute_bleu(
reference_corpus=_UpperCamelCase , translation_corpus=_UpperCamelCase , max_order=_UpperCamelCase , smooth=_UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) : Tuple = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 29 |
def lowercase__ ( __snake_case : int , __snake_case : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCAmelCase_ : Tuple = str(bin(__snake_case ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : Union[str, Any] = str(bin(__snake_case ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : List[Any] = max(len(__snake_case ) , len(__snake_case ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(__snake_case ) , b_binary.zfill(__snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 1 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowercase__ ( __snake_case : List[Any] , __snake_case : List[str]=False ):
'''simple docstring'''
try:
UpperCAmelCase_ : int = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCAmelCase_ : Optional[int] = default
else:
# KEY is set, convert it to True or False.
try:
UpperCAmelCase_ : List[Any] = strtobool(__snake_case )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
__UpperCAmelCase = parse_flag_from_env('RUN_SLOW', default=False)
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
return unittest.skip('Test was skipped' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(__snake_case )
def lowercase__ ( __snake_case : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(__snake_case )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(__snake_case )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(__snake_case )
def lowercase__ ( __snake_case : Dict=None , __snake_case : Dict=None ):
'''simple docstring'''
if test_case is None:
return partial(__snake_case , version=__snake_case )
return unittest.skipUnless(is_torch_version('>=' , __snake_case ) , F"test requires torch version >= {version}" )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(__snake_case )
__UpperCAmelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(__snake_case )
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
_snake_case : Union[str, Any] = True
@classmethod
def __UpperCAmelCase ( cls ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = tempfile.mkdtemp()
@classmethod
def __UpperCAmelCase ( cls ) -> List[str]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __UpperCAmelCase ( self ) -> str:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_UpperCamelCase )
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Optional[int]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : List[Any] = mocks if isinstance(_UpperCamelCase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = AcceleratorState()
UpperCAmelCase_ : str = tensor[None].clone().to(state.device )
UpperCAmelCase_ : List[str] = gather(__snake_case ).cpu()
UpperCAmelCase_ : List[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __snake_case ):
return False
return True
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : str = returncode
UpperCAmelCase_ : Optional[Any] = stdout
UpperCAmelCase_ : Optional[Any] = stderr
async def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Optional[int] ):
'''simple docstring'''
while True:
UpperCAmelCase_ : Dict = await stream.readline()
if line:
callback(__snake_case )
else:
break
async def lowercase__ ( __snake_case : Optional[int] , __snake_case : Dict=None , __snake_case : str=None , __snake_case : Dict=None , __snake_case : List[str]=False , __snake_case : Optional[int]=False ):
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__snake_case , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__snake_case , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : str = []
def tee(__snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Optional[int]="" ):
UpperCAmelCase_ : List[str] = line.decode('utf-8' ).rstrip()
sink.append(__snake_case )
if not quiet:
print(__snake_case , __snake_case , file=__snake_case )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __snake_case : tee(__snake_case , __snake_case , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __snake_case : tee(__snake_case , __snake_case , sys.stderr , label='stderr:' ) ) ),
] , timeout=__snake_case , )
return _RunOutput(await p.wait() , __snake_case , __snake_case )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[Any]=None , __snake_case : str=None , __snake_case : Tuple=180 , __snake_case : Dict=False , __snake_case : Optional[Any]=True ):
'''simple docstring'''
UpperCAmelCase_ : str = asyncio.get_event_loop()
UpperCAmelCase_ : int = loop.run_until_complete(
_stream_subprocess(__snake_case , env=__snake_case , stdin=__snake_case , timeout=__snake_case , quiet=__snake_case , echo=__snake_case ) )
UpperCAmelCase_ : int = ' '.join(__snake_case )
if result.returncode > 0:
UpperCAmelCase_ : int = '\n'.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class lowerCamelCase (_snake_case ):
'''simple docstring'''
pass
def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any]=False ):
'''simple docstring'''
try:
UpperCAmelCase_ : List[Any] = subprocess.check_output(__snake_case , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__snake_case , 'decode' ):
UpperCAmelCase_ : str = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(__snake_case )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 29 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'vocab.txt'}
__UpperCAmelCase = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__UpperCAmelCase = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__UpperCAmelCase = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = VOCAB_FILES_NAMES
_snake_case : int = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Dict = PRETRAINED_INIT_CONFIGURATION
_snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Any = ConvBertTokenizer
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase="[UNK]" , _UpperCamelCase="[SEP]" , _UpperCamelCase="[PAD]" , _UpperCamelCase="[CLS]" , _UpperCamelCase="[MASK]" , _UpperCamelCase=True , _UpperCamelCase=None , **_UpperCamelCase , ) -> Dict:
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
UpperCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _UpperCamelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ : Any = getattr(_UpperCamelCase , normalizer_state.pop('type' ) )
UpperCAmelCase_ : str = do_lower_case
UpperCAmelCase_ : List[Any] = strip_accents
UpperCAmelCase_ : str = tokenize_chinese_chars
UpperCAmelCase_ : Tuple = normalizer_class(**_UpperCamelCase )
UpperCAmelCase_ : Any = do_lower_case
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=None ) -> List[str]:
UpperCAmelCase_ : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
UpperCAmelCase_ : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
UpperCAmelCase_ : Any = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 29 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Tuple = '''gpt_neox_japanese'''
def __init__( self , _UpperCamelCase=3_2_0_0_0 , _UpperCamelCase=2_5_6_0 , _UpperCamelCase=3_2 , _UpperCamelCase=3_2 , _UpperCamelCase=4 , _UpperCamelCase="gelu" , _UpperCamelCase=1.00 , _UpperCamelCase=1_0_0_0_0 , _UpperCamelCase=2_0_4_8 , _UpperCamelCase=0.02 , _UpperCamelCase=1E-5 , _UpperCamelCase=True , _UpperCamelCase=3_1_9_9_6 , _UpperCamelCase=3_1_9_9_9 , _UpperCamelCase=0.1 , _UpperCamelCase=0.0 , **_UpperCamelCase , ) -> str:
super().__init__(bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : str = vocab_size
UpperCAmelCase_ : str = max_position_embeddings
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : Tuple = intermediate_multiple_size
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : List[Any] = rotary_pct
UpperCAmelCase_ : Tuple = rotary_emb_base
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : List[str] = layer_norm_eps
UpperCAmelCase_ : Union[str, Any] = use_cache
UpperCAmelCase_ : Dict = attention_dropout
UpperCAmelCase_ : List[str] = hidden_dropout
| 29 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'snap-research/efficientformer-l1-300': (
'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'
),
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = '''efficientformer'''
def __init__( self , _UpperCamelCase = [3, 2, 6, 4] , _UpperCamelCase = [4_8, 9_6, 2_2_4, 4_4_8] , _UpperCamelCase = [True, True, True, True] , _UpperCamelCase = 4_4_8 , _UpperCamelCase = 3_2 , _UpperCamelCase = 4 , _UpperCamelCase = 7 , _UpperCamelCase = 5 , _UpperCamelCase = 8 , _UpperCamelCase = 4 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1_6 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 2 , _UpperCamelCase = 1 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1 , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1E-5 , _UpperCamelCase = "gelu" , _UpperCamelCase = 0.02 , _UpperCamelCase = 1E-12 , _UpperCamelCase = 2_2_4 , _UpperCamelCase = 1E-05 , **_UpperCamelCase , ) -> None:
super().__init__(**_UpperCamelCase )
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase_ : Tuple = hidden_sizes
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : List[str] = patch_size
UpperCAmelCase_ : Union[str, Any] = num_channels
UpperCAmelCase_ : Optional[Any] = depths
UpperCAmelCase_ : List[Any] = mlp_expansion_ratio
UpperCAmelCase_ : List[str] = downsamples
UpperCAmelCase_ : List[Any] = dim
UpperCAmelCase_ : Tuple = key_dim
UpperCAmelCase_ : Optional[int] = attention_ratio
UpperCAmelCase_ : str = resolution
UpperCAmelCase_ : Dict = pool_size
UpperCAmelCase_ : Union[str, Any] = downsample_patch_size
UpperCAmelCase_ : List[str] = downsample_stride
UpperCAmelCase_ : List[str] = downsample_pad
UpperCAmelCase_ : Any = drop_path_rate
UpperCAmelCase_ : Dict = num_metaad_blocks
UpperCAmelCase_ : Dict = distillation
UpperCAmelCase_ : int = use_layer_scale
UpperCAmelCase_ : Any = layer_scale_init_value
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : Dict = batch_norm_eps
| 29 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
_snake_case : str = ViTImageProcessor if is_vision_available() else None
@property
def __UpperCAmelCase ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : str = (3, 3_2, 1_2_8)
UpperCAmelCase_ : int = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase_ : int = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
UpperCAmelCase_ : Optional[int] = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
UpperCAmelCase_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCamelCase ) + '\n' )
UpperCAmelCase_ : Dict = {
'do_normalize': False,
'do_resize': True,
'image_processor_type': 'ViTImageProcessor',
'resample': 3,
'size': {'height': 3_2, 'width': 1_2_8},
}
UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname , _UpperCamelCase )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self , **_UpperCamelCase ) -> Dict:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __UpperCAmelCase ( self , **_UpperCamelCase ) -> List[Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : List[str] = np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )
UpperCAmelCase_ : Any = Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1 ) )
return image_input
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : int = self.get_image_processor()
UpperCAmelCase_ : Optional[Any] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : int = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = self.get_tokenizer()
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : Dict = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : str = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
UpperCAmelCase_ : Optional[int] = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0 )
UpperCAmelCase_ : List[str] = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : Union[str, Any] = self.get_image_processor()
UpperCAmelCase_ : Tuple = self.get_tokenizer()
UpperCAmelCase_ : str = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
UpperCAmelCase_ : int = self.prepare_image_inputs()
UpperCAmelCase_ : List[Any] = image_processor(_UpperCamelCase , return_tensors='np' )
UpperCAmelCase_ : Tuple = processor(images=_UpperCamelCase , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : Dict = self.get_image_processor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : str = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
UpperCAmelCase_ : List[str] = 'test'
UpperCAmelCase_ : Union[str, Any] = processor(text=_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = tokenizer(_UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = self.get_image_processor()
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : Dict = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
UpperCAmelCase_ : str = 'test'
UpperCAmelCase_ : str = self.prepare_image_inputs()
UpperCAmelCase_ : str = processor(text=_UpperCamelCase , images=_UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'labels'] )
# test if it raises when no input is passed
with pytest.raises(_UpperCamelCase ):
processor()
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : List[Any] = self.get_image_processor()
UpperCAmelCase_ : str = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ : Tuple = processor.char_decode(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = tokenizer.batch_decode(_UpperCamelCase )
UpperCAmelCase_ : Dict = [seq.replace(' ' , '' ) for seq in decoded_tok]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : Dict = self.get_image_processor()
UpperCAmelCase_ : str = self.get_tokenizer()
UpperCAmelCase_ : Tuple = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : str = self.prepare_image_inputs()
UpperCAmelCase_ : Optional[int] = processor(text=_UpperCamelCase , images=_UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : Any = self.get_image_processor()
UpperCAmelCase_ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = MgpstrProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = torch.randn(1 , 2_7 , 3_8 )
UpperCAmelCase_ : List[Any] = torch.randn(1 , 2_7 , 5_0_2_5_7 )
UpperCAmelCase_ : List[Any] = torch.randn(1 , 2_7 , 3_0_5_2_2 )
UpperCAmelCase_ : Any = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['generated_text', 'scores', 'char_preds', 'bpe_preds', 'wp_preds'] )
| 29 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Union[PIL.Image.Image, np.ndarray]
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Any:
super().__init__()
self.register_modules(
prior=_UpperCamelCase , image_encoder=_UpperCamelCase , image_processor=_UpperCamelCase , scheduler=_UpperCamelCase , renderer=_UpperCamelCase , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
if latents is None:
UpperCAmelCase_ : str = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
UpperCAmelCase_ : Tuple = latents.to(_UpperCamelCase )
UpperCAmelCase_ : Tuple = latents * scheduler.init_noise_sigma
return latents
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase_ : int = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase_ : int = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
@property
def __UpperCAmelCase ( self ) -> int:
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_UpperCamelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> str:
if isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , torch.Tensor ):
UpperCAmelCase_ : int = torch.cat(_UpperCamelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(_UpperCamelCase , axis=0 )
if not isinstance(_UpperCamelCase , torch.Tensor ):
UpperCAmelCase_ : Optional[int] = self.image_processor(_UpperCamelCase , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
UpperCAmelCase_ : Tuple = image.to(dtype=self.image_encoder.dtype , device=_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = self.image_encoder(_UpperCamelCase )['last_hidden_state']
UpperCAmelCase_ : Union[str, Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
UpperCAmelCase_ : List[str] = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : Dict = torch.zeros_like(_UpperCamelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase_ : Optional[int] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = 2_5 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 4.0 , _UpperCamelCase = 6_4 , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> Union[str, Any]:
if isinstance(_UpperCamelCase , PIL.Image.Image ):
UpperCAmelCase_ : Tuple = 1
elif isinstance(_UpperCamelCase , torch.Tensor ):
UpperCAmelCase_ : str = image.shape[0]
elif isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
UpperCAmelCase_ : Optional[int] = len(_UpperCamelCase )
else:
raise ValueError(
f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_UpperCamelCase )}" )
UpperCAmelCase_ : Tuple = self._execution_device
UpperCAmelCase_ : str = batch_size * num_images_per_prompt
UpperCAmelCase_ : str = guidance_scale > 1.0
UpperCAmelCase_ : str = self._encode_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# prior
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
UpperCAmelCase_ : int = self.scheduler.timesteps
UpperCAmelCase_ : int = self.prior.config.num_embeddings
UpperCAmelCase_ : Any = self.prior.config.embedding_dim
UpperCAmelCase_ : List[str] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
UpperCAmelCase_ : List[Any] = latents.reshape(latents.shape[0] , _UpperCamelCase , _UpperCamelCase )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : Optional[Any] = self.scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : int = self.prior(
_UpperCamelCase , timestep=_UpperCamelCase , proj_embedding=_UpperCamelCase , ).predicted_image_embedding
# remove the variance
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 )
UpperCAmelCase_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
UpperCAmelCase_ : List[str] = self.scheduler.step(
_UpperCamelCase , timestep=_UpperCamelCase , sample=_UpperCamelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = []
for i, latent in enumerate(_UpperCamelCase ):
print()
UpperCAmelCase_ : List[str] = self.renderer.decode(
latent[None, :] , _UpperCamelCase , size=_UpperCamelCase , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = torch.stack(_UpperCamelCase )
if output_type not in ["np", "pil"]:
raise ValueError(f"Only the output types `pil` and `np` are supported not output_type={output_type}" )
UpperCAmelCase_ : Dict = images.cpu().numpy()
if output_type == "pil":
UpperCAmelCase_ : List[str] = [self.numpy_to_pil(_UpperCamelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_UpperCamelCase )
| 29 | 1 |
def lowercase__ ( __snake_case : int , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
UpperCAmelCase_ : Optional[Any] = n - k
# Calculate C(n,k)
for i in range(__snake_case ):
result *= n - i
result //= i + 1
return result
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
return binomial_coefficient(2 * node_count , __snake_case ) // (node_count + 1)
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
if n < 0:
raise ValueError('factorial() not defined for negative values' )
UpperCAmelCase_ : str = 1
for i in range(1 , n + 1 ):
result *= i
return result
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
return catalan_number(__snake_case ) * factorial(__snake_case )
if __name__ == "__main__":
__UpperCAmelCase = int(input('Enter the number of nodes: ').strip() or 0)
if node_count <= 0:
raise ValueError('We need some nodes to work with.')
print(
F'Given {node_count} nodes, there are {binary_tree_count(node_count)} '
F'binary trees and {catalan_number(node_count)} binary search trees.'
)
| 29 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : Union[str, Any] = IFImgaImgSuperResolutionPipeline
_snake_case : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
_snake_case : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
_snake_case : List[str] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __UpperCAmelCase ( self ) -> Optional[Any]:
return self._get_superresolution_dummy_components()
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> Any:
if str(_UpperCamelCase ).startswith('mps' ):
UpperCAmelCase_ : List[Any] = torch.manual_seed(_UpperCamelCase )
else:
UpperCAmelCase_ : int = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
UpperCAmelCase_ : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
UpperCAmelCase_ : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCAmelCase ( self ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __UpperCAmelCase ( self ) -> Dict:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __UpperCAmelCase ( self ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __UpperCAmelCase ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self._test_save_load_local()
def __UpperCAmelCase ( self ) -> Dict:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 29 | 1 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase (_snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : Optional[Any] = AudioLDMPipeline
_snake_case : List[Any] = TEXT_TO_AUDIO_PARAMS
_snake_case : Dict = TEXT_TO_AUDIO_BATCH_PARAMS
_snake_case : int = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def __UpperCAmelCase ( self ) -> str:
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(3_2, 6_4) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=3_2 , class_embeddings_concat=_UpperCamelCase , )
UpperCAmelCase_ : Union[str, Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=_UpperCamelCase , set_alpha_to_one=_UpperCamelCase , )
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase_ : int = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , projection_dim=3_2 , )
UpperCAmelCase_ : List[str] = ClapTextModelWithProjection(_UpperCamelCase )
UpperCAmelCase_ : Any = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=7_7 )
UpperCAmelCase_ : Any = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6_0_0_0 , upsample_initial_channel=1_6 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=_UpperCamelCase , )
UpperCAmelCase_ : Union[str, Any] = SpeechTaHifiGan(_UpperCamelCase )
UpperCAmelCase_ : Any = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> Optional[int]:
if str(_UpperCamelCase ).startswith('mps' ):
UpperCAmelCase_ : Tuple = torch.manual_seed(_UpperCamelCase )
else:
UpperCAmelCase_ : List[str] = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Any = self.get_dummy_components()
UpperCAmelCase_ : Any = AudioLDMPipeline(**_UpperCamelCase )
UpperCAmelCase_ : List[str] = audioldm_pipe.to(_UpperCamelCase )
audioldm_pipe.set_progress_bar_config(disable=_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = audioldm_pipe(**_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = output.audios[0]
assert audio.ndim == 1
assert len(_UpperCamelCase ) == 2_5_6
UpperCAmelCase_ : int = audio[:1_0]
UpperCAmelCase_ : List[str] = np.array(
[-0.00_50, 0.00_50, -0.00_60, 0.00_33, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_33] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : Optional[int] = self.get_dummy_components()
UpperCAmelCase_ : Union[str, Any] = AudioLDMPipeline(**_UpperCamelCase )
UpperCAmelCase_ : int = audioldm_pipe.to(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = audioldm_pipe.to(_UpperCamelCase )
audioldm_pipe.set_progress_bar_config(disable=_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = self.get_dummy_inputs(_UpperCamelCase )
UpperCAmelCase_ : Tuple = 3 * [inputs['prompt']]
# forward
UpperCAmelCase_ : Optional[Any] = audioldm_pipe(**_UpperCamelCase )
UpperCAmelCase_ : Tuple = output.audios[0]
UpperCAmelCase_ : Optional[Any] = self.get_dummy_inputs(_UpperCamelCase )
UpperCAmelCase_ : str = 3 * [inputs.pop('prompt' )]
UpperCAmelCase_ : Union[str, Any] = audioldm_pipe.tokenizer(
_UpperCamelCase , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_UpperCamelCase , return_tensors='pt' , )
UpperCAmelCase_ : str = text_inputs['input_ids'].to(_UpperCamelCase )
UpperCAmelCase_ : Any = audioldm_pipe.text_encoder(
_UpperCamelCase , )
UpperCAmelCase_ : List[Any] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase_ : Any = F.normalize(_UpperCamelCase , dim=-1 )
UpperCAmelCase_ : int = prompt_embeds
# forward
UpperCAmelCase_ : str = audioldm_pipe(**_UpperCamelCase )
UpperCAmelCase_ : Any = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_components()
UpperCAmelCase_ : Optional[Any] = AudioLDMPipeline(**_UpperCamelCase )
UpperCAmelCase_ : int = audioldm_pipe.to(_UpperCamelCase )
UpperCAmelCase_ : Any = audioldm_pipe.to(_UpperCamelCase )
audioldm_pipe.set_progress_bar_config(disable=_UpperCamelCase )
UpperCAmelCase_ : Dict = self.get_dummy_inputs(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = 3 * ['this is a negative prompt']
UpperCAmelCase_ : Optional[int] = negative_prompt
UpperCAmelCase_ : Any = 3 * [inputs['prompt']]
# forward
UpperCAmelCase_ : Optional[Any] = audioldm_pipe(**_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = output.audios[0]
UpperCAmelCase_ : Any = self.get_dummy_inputs(_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = 3 * [inputs.pop('prompt' )]
UpperCAmelCase_ : List[Any] = []
for p in [prompt, negative_prompt]:
UpperCAmelCase_ : Dict = audioldm_pipe.tokenizer(
_UpperCamelCase , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_UpperCamelCase , return_tensors='pt' , )
UpperCAmelCase_ : Optional[Any] = text_inputs['input_ids'].to(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = audioldm_pipe.text_encoder(
_UpperCamelCase , )
UpperCAmelCase_ : List[str] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase_ : Optional[int] = F.normalize(_UpperCamelCase , dim=-1 )
embeds.append(_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = embeds
# forward
UpperCAmelCase_ : Union[str, Any] = audioldm_pipe(**_UpperCamelCase )
UpperCAmelCase_ : List[str] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Optional[int] = self.get_dummy_components()
UpperCAmelCase_ : Union[str, Any] = PNDMScheduler(skip_prk_steps=_UpperCamelCase )
UpperCAmelCase_ : Dict = AudioLDMPipeline(**_UpperCamelCase )
UpperCAmelCase_ : str = audioldm_pipe.to(_UpperCamelCase )
audioldm_pipe.set_progress_bar_config(disable=_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = self.get_dummy_inputs(_UpperCamelCase )
UpperCAmelCase_ : Any = 'egg cracking'
UpperCAmelCase_ : List[Any] = audioldm_pipe(**_UpperCamelCase , negative_prompt=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = output.audios[0]
assert audio.ndim == 1
assert len(_UpperCamelCase ) == 2_5_6
UpperCAmelCase_ : List[Any] = audio[:1_0]
UpperCAmelCase_ : int = np.array(
[-0.00_51, 0.00_50, -0.00_60, 0.00_34, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_32] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Optional[int] = self.get_dummy_components()
UpperCAmelCase_ : Any = PNDMScheduler(skip_prk_steps=_UpperCamelCase )
UpperCAmelCase_ : List[str] = AudioLDMPipeline(**_UpperCamelCase )
UpperCAmelCase_ : Tuple = audioldm_pipe.to(_UpperCamelCase )
audioldm_pipe.set_progress_bar_config(disable=_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
UpperCAmelCase_ : int = audioldm_pipe(_UpperCamelCase , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_5_6)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : List[str] = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_5_6)
# test num_waveforms_per_prompt for single prompt
UpperCAmelCase_ : List[Any] = 2
UpperCAmelCase_ : Optional[Any] = audioldm_pipe(_UpperCamelCase , num_inference_steps=2 , num_waveforms_per_prompt=_UpperCamelCase ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_5_6)
# test num_waveforms_per_prompt for batch of prompts
UpperCAmelCase_ : str = 2
UpperCAmelCase_ : int = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=_UpperCamelCase ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6)
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Tuple = self.get_dummy_components()
UpperCAmelCase_ : Optional[int] = AudioLDMPipeline(**_UpperCamelCase )
UpperCAmelCase_ : Dict = audioldm_pipe.to(_UpperCamelCase )
audioldm_pipe.set_progress_bar_config(disable=_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = audioldm_pipe.vocoder.config.sampling_rate
UpperCAmelCase_ : str = self.get_dummy_inputs(_UpperCamelCase )
UpperCAmelCase_ : str = audioldm_pipe(audio_length_in_s=0.0_16 , **_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = output.audios[0]
assert audio.ndim == 1
assert len(_UpperCamelCase ) / vocoder_sampling_rate == 0.0_16
UpperCAmelCase_ : Dict = audioldm_pipe(audio_length_in_s=0.0_32 , **_UpperCamelCase )
UpperCAmelCase_ : int = output.audios[0]
assert audio.ndim == 1
assert len(_UpperCamelCase ) / vocoder_sampling_rate == 0.0_32
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : Dict = self.get_dummy_components()
UpperCAmelCase_ : Optional[int] = AudioLDMPipeline(**_UpperCamelCase )
UpperCAmelCase_ : int = audioldm_pipe.to(_UpperCamelCase )
audioldm_pipe.set_progress_bar_config(disable=_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = ['hey']
UpperCAmelCase_ : Optional[Any] = audioldm_pipe(_UpperCamelCase , num_inference_steps=1 )
UpperCAmelCase_ : Any = output.audios.shape
assert audio_shape == (1, 2_5_6)
UpperCAmelCase_ : Any = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
UpperCAmelCase_ : List[str] = SpeechTaHifiGan(_UpperCamelCase ).to(_UpperCamelCase )
UpperCAmelCase_ : List[str] = audioldm_pipe(_UpperCamelCase , num_inference_steps=1 )
UpperCAmelCase_ : str = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_5_6)
def __UpperCAmelCase ( self ) -> Optional[int]:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Tuple:
self._test_inference_batch_single_identical(test_mean_pixel_difference=_UpperCamelCase )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCAmelCase ( self ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_UpperCamelCase )
@slow
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase="cpu" , _UpperCamelCase=torch.floataa , _UpperCamelCase=0 ) -> str:
UpperCAmelCase_ : int = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
UpperCAmelCase_ : Dict = np.random.RandomState(_UpperCamelCase ).standard_normal((1, 8, 1_2_8, 1_6) )
UpperCAmelCase_ : List[str] = torch.from_numpy(_UpperCamelCase ).to(device=_UpperCamelCase , dtype=_UpperCamelCase )
UpperCAmelCase_ : int = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : List[str] = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
UpperCAmelCase_ : List[str] = audioldm_pipe.to(_UpperCamelCase )
audioldm_pipe.set_progress_bar_config(disable=_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = self.get_inputs(_UpperCamelCase )
UpperCAmelCase_ : Tuple = 2_5
UpperCAmelCase_ : Any = audioldm_pipe(**_UpperCamelCase ).audios[0]
assert audio.ndim == 1
assert len(_UpperCamelCase ) == 8_1_9_2_0
UpperCAmelCase_ : Union[str, Any] = audio[7_7_2_3_0:7_7_2_4_0]
UpperCAmelCase_ : List[Any] = np.array(
[-0.48_84, -0.46_07, 0.00_23, 0.50_07, 0.58_96, 0.51_51, 0.38_13, -0.02_08, -0.36_87, -0.43_15] )
UpperCAmelCase_ : Dict = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : List[str] = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
UpperCAmelCase_ : str = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
UpperCAmelCase_ : Union[str, Any] = audioldm_pipe.to(_UpperCamelCase )
audioldm_pipe.set_progress_bar_config(disable=_UpperCamelCase )
UpperCAmelCase_ : int = self.get_inputs(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = audioldm_pipe(**_UpperCamelCase ).audios[0]
assert audio.ndim == 1
assert len(_UpperCamelCase ) == 8_1_9_2_0
UpperCAmelCase_ : Dict = audio[2_7_7_8_0:2_7_7_9_0]
UpperCAmelCase_ : Union[str, Any] = np.array([-0.21_31, -0.08_73, -0.01_24, -0.01_89, 0.05_69, 0.13_73, 0.18_83, 0.28_86, 0.32_97, 0.22_12] )
UpperCAmelCase_ : Union[str, Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 29 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 | 1 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
__UpperCAmelCase = {
'E': 1_2.7_0,
'T': 9.0_6,
'A': 8.1_7,
'O': 7.5_1,
'I': 6.9_7,
'N': 6.7_5,
'S': 6.3_3,
'H': 6.0_9,
'R': 5.9_9,
'D': 4.2_5,
'L': 4.0_3,
'C': 2.7_8,
'U': 2.7_6,
'M': 2.4_1,
'W': 2.3_6,
'F': 2.2_3,
'G': 2.0_2,
'Y': 1.9_7,
'P': 1.9_3,
'B': 1.2_9,
'V': 0.9_8,
'K': 0.7_7,
'J': 0.1_5,
'X': 0.1_5,
'Q': 0.1_0,
'Z': 0.0_7,
}
__UpperCAmelCase = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
__UpperCAmelCase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : int = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def lowercase__ ( __snake_case : tuple ):
'''simple docstring'''
return x[0]
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = get_letter_count(__snake_case )
UpperCAmelCase_ : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(__snake_case )
UpperCAmelCase_ : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=__snake_case )
UpperCAmelCase_ : List[str] = ''.join(freq_to_letter[freq] )
UpperCAmelCase_ : int = list(freq_to_letter_str.items() )
freq_pairs.sort(key=__snake_case , reverse=__snake_case )
UpperCAmelCase_ : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = get_frequency_order(__snake_case )
UpperCAmelCase_ : Union[str, Any] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase=None , **_UpperCamelCase ) -> Dict:
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
UpperCAmelCase_ : Any = model
UpperCAmelCase_ : int = kwargs.get('model_save_dir' , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = kwargs.get('latest_model_name' , _UpperCamelCase )
def __call__( self , **_UpperCamelCase ) -> str:
UpperCAmelCase_ : Optional[int] = {k: np.array(_UpperCamelCase ) for k, v in kwargs.items()}
return self.model.run(_UpperCamelCase , _UpperCamelCase )
@staticmethod
def __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ) -> List[Any]:
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
UpperCAmelCase_ : List[str] = 'CPUExecutionProvider'
return ort.InferenceSession(_UpperCamelCase , providers=[provider] , sess_options=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME
UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name )
UpperCAmelCase_ : str = Path(_UpperCamelCase ).joinpath(_UpperCamelCase )
try:
shutil.copyfile(_UpperCamelCase , _UpperCamelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(_UpperCamelCase )
if src_path.exists():
UpperCAmelCase_ : List[Any] = Path(_UpperCamelCase ).joinpath(_UpperCamelCase )
try:
shutil.copyfile(_UpperCamelCase , _UpperCamelCase )
except shutil.SameFileError:
pass
def __UpperCAmelCase ( self , _UpperCamelCase , **_UpperCamelCase , ) -> List[str]:
if os.path.isfile(_UpperCamelCase ):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file" )
return
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
# saving model weights/files
self._save_pretrained(_UpperCamelCase , **_UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> List[str]:
UpperCAmelCase_ : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(
os.path.join(_UpperCamelCase , _UpperCamelCase ) , provider=_UpperCamelCase , sess_options=_UpperCamelCase )
UpperCAmelCase_ : Tuple = Path(_UpperCamelCase )
# load model from hub
else:
# download model
UpperCAmelCase_ : List[str] = hf_hub_download(
repo_id=_UpperCamelCase , filename=_UpperCamelCase , use_auth_token=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , )
UpperCAmelCase_ : Union[str, Any] = Path(_UpperCamelCase ).parent
UpperCAmelCase_ : List[str] = Path(_UpperCamelCase ).name
UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(_UpperCamelCase , provider=_UpperCamelCase , sess_options=_UpperCamelCase )
return cls(model=_UpperCamelCase , **_UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> Optional[int]:
UpperCAmelCase_ : List[str] = None
if len(str(_UpperCamelCase ).split('@' ) ) == 2:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = model_id.split('@' )
return cls._from_pretrained(
model_id=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , use_auth_token=_UpperCamelCase , **_UpperCamelCase , )
| 29 | 1 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'snap-research/efficientformer-l1-300': (
'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'
),
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = '''efficientformer'''
def __init__( self , _UpperCamelCase = [3, 2, 6, 4] , _UpperCamelCase = [4_8, 9_6, 2_2_4, 4_4_8] , _UpperCamelCase = [True, True, True, True] , _UpperCamelCase = 4_4_8 , _UpperCamelCase = 3_2 , _UpperCamelCase = 4 , _UpperCamelCase = 7 , _UpperCamelCase = 5 , _UpperCamelCase = 8 , _UpperCamelCase = 4 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1_6 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 2 , _UpperCamelCase = 1 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1 , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1E-5 , _UpperCamelCase = "gelu" , _UpperCamelCase = 0.02 , _UpperCamelCase = 1E-12 , _UpperCamelCase = 2_2_4 , _UpperCamelCase = 1E-05 , **_UpperCamelCase , ) -> None:
super().__init__(**_UpperCamelCase )
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase_ : Tuple = hidden_sizes
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : List[str] = patch_size
UpperCAmelCase_ : Union[str, Any] = num_channels
UpperCAmelCase_ : Optional[Any] = depths
UpperCAmelCase_ : List[Any] = mlp_expansion_ratio
UpperCAmelCase_ : List[str] = downsamples
UpperCAmelCase_ : List[Any] = dim
UpperCAmelCase_ : Tuple = key_dim
UpperCAmelCase_ : Optional[int] = attention_ratio
UpperCAmelCase_ : str = resolution
UpperCAmelCase_ : Dict = pool_size
UpperCAmelCase_ : Union[str, Any] = downsample_patch_size
UpperCAmelCase_ : List[str] = downsample_stride
UpperCAmelCase_ : List[str] = downsample_pad
UpperCAmelCase_ : Any = drop_path_rate
UpperCAmelCase_ : Dict = num_metaad_blocks
UpperCAmelCase_ : Dict = distillation
UpperCAmelCase_ : int = use_layer_scale
UpperCAmelCase_ : Any = layer_scale_init_value
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : Dict = batch_norm_eps
| 29 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = 10
UpperCAmelCase_ : Tuple = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
UpperCAmelCase_ : Tuple = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(__snake_case ) ),
} , features=__snake_case , )
return dataset
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : str = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=__snake_case )
return filename
# FILE_CONTENT + files
__UpperCAmelCase = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt'
UpperCAmelCase_ : Tuple = FILE_CONTENT
with open(__snake_case , 'w' ) as f:
f.write(__snake_case )
return filename
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
import bza
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
UpperCAmelCase_ : str = bytes(__snake_case , 'utf-8' )
with bza.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
UpperCAmelCase_ : Dict = bytes(__snake_case , 'utf-8' )
with gzip.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' )
with lza.frame.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : List[Any] ):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(__snake_case , 'w' ) as archive:
archive.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any] ):
'''simple docstring'''
import tarfile
UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
import lzma
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' )
with lzma.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[int] , __snake_case : Optional[Any] ):
'''simple docstring'''
import zipfile
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
UpperCAmelCase_ : Tuple = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
UpperCAmelCase_ : List[str] = bytes(__snake_case , 'utf-8' )
with zstd.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.xml'
UpperCAmelCase_ : List[Any] = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(__snake_case , 'w' ) as f:
f.write(__snake_case )
return filename
__UpperCAmelCase = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
__UpperCAmelCase = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
__UpperCAmelCase = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
__UpperCAmelCase = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
__UpperCAmelCase = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = datasets.Dataset.from_dict(__snake_case )
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(__snake_case ) ) as con:
UpperCAmelCase_ : List[Any] = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(__snake_case , 'w' , newline='' ) as f:
UpperCAmelCase_ : Tuple = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(__snake_case , 'w' , newline='' ) as f:
UpperCAmelCase_ : Optional[Any] = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Any ):
'''simple docstring'''
import bza
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(__snake_case , 'rb' ) as f:
UpperCAmelCase_ : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] , __snake_case : Tuple , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(__snake_case , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : int , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
UpperCAmelCase_ : Dict = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(__snake_case , 'wb' ) as f:
UpperCAmelCase_ : List[Any] = pq.ParquetWriter(__snake_case , schema=__snake_case )
UpperCAmelCase_ : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__snake_case ) )] for k in DATA[0]} , schema=__snake_case )
writer.write_table(__snake_case )
writer.close()
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
UpperCAmelCase_ : Optional[int] = {'data': DATA}
with open(__snake_case , 'w' ) as f:
json.dump(__snake_case , __snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
UpperCAmelCase_ : Tuple = {'data': DATA_DICT_OF_LISTS}
with open(__snake_case , 'w' ) as f:
json.dump(__snake_case , __snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : Dict ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(__snake_case , 'rb' ) as orig_file:
with gzip.open(__snake_case , 'wb' ) as zipped_file:
zipped_file.writelines(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int , __snake_case : Any ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(__snake_case , 'rb' ) as orig_file:
with gzip.open(__snake_case , 'wb' ) as zipped_file:
zipped_file.writelines(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : str , __snake_case : Dict , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : str , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Any , __snake_case : Any , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Any = ['0', '1', '2', '3']
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = ['0', '1', '2', '3']
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ['0', '1', '2', '3']
UpperCAmelCase_ : List[str] = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : str , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename('unsupported.ext' ) )
f.write(__snake_case , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(__snake_case , 'w' , encoding='utf-8' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 29 | 1 |
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
__UpperCAmelCase = True
from torch.cuda.amp import autocast
__UpperCAmelCase = logging.getLogger(__name__)
def lowercase__ ( __snake_case : Optional[int]=None , __snake_case : List[Any]=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=__snake_case )
@dataclass
class lowerCamelCase :
'''simple docstring'''
_snake_case : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_snake_case : Optional[str] = field(
default=_snake_case , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_snake_case : Optional[bool] = field(
default=_snake_case , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
_snake_case : Optional[float] = field(
default=0.1 , metadata={'''help''': '''The dropout ratio for the attention probabilities.'''} )
_snake_case : Optional[float] = field(
default=0.1 , metadata={'''help''': '''The dropout ratio for activations inside the fully connected layer.'''} )
_snake_case : Optional[float] = field(
default=0.1 , metadata={
'''help''': '''The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'''
} , )
_snake_case : Optional[float] = field(
default=0.1 , metadata={'''help''': '''The dropout probabilitiy for all 1D convolutional layers in feature extractor.'''} , )
_snake_case : Optional[float] = field(
default=0.05 , metadata={
'''help''': (
'''Propability of each feature vector along the time axis to be chosen as the start of the vector'''
'''span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'''
'''vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'''
)
} , )
_snake_case : Optional[float] = field(default=0.0 , metadata={'''help''': '''The LayerDrop probability.'''} )
@dataclass
class lowerCamelCase :
'''simple docstring'''
_snake_case : Optional[str] = field(
default=_snake_case , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
_snake_case : Optional[str] = field(
default='''train+validation''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
_snake_case : bool = field(
default=_snake_case , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
_snake_case : Optional[int] = field(
default=_snake_case , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
_snake_case : Optional[int] = field(
default=_snake_case , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
_snake_case : Optional[int] = field(
default=_snake_case , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of validation examples to this '''
'''value if set.'''
)
} , )
_snake_case : List[str] = list_field(
default=[''',''', '''?''', '''.''', '''!''', '''-''', ''';''', ''':''', '''""''', '''%''', '''\'''', '''"''', '''�'''] , metadata={'''help''': '''A list of characters to remove from the transcripts.'''} , )
@dataclass
class lowerCamelCase :
'''simple docstring'''
_snake_case : WavaVecaProcessor
_snake_case : Union[bool, str] = True
_snake_case : Optional[int] = None
_snake_case : Optional[int] = None
_snake_case : Optional[int] = None
_snake_case : Optional[int] = None
def __call__( self , _UpperCamelCase ) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
UpperCAmelCase_ : Optional[int] = [{'input_values': feature['input_values']} for feature in features]
UpperCAmelCase_ : int = [{'input_ids': feature['labels']} for feature in features]
UpperCAmelCase_ : Any = self.processor.pad(
_UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
UpperCAmelCase_ : List[Any] = self.processor.pad(
labels=_UpperCamelCase , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='pt' , )
# replace padding with -100 to ignore loss correctly
UpperCAmelCase_ : Optional[Any] = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_0_0 )
UpperCAmelCase_ : str = labels
return batch
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> torch.Tensor:
model.train()
UpperCAmelCase_ : Any = self._prepare_inputs(_UpperCamelCase )
if self.use_amp:
with autocast():
UpperCAmelCase_ : Tuple = self.compute_loss(_UpperCamelCase , _UpperCamelCase )
else:
UpperCAmelCase_ : Tuple = self.compute_loss(_UpperCamelCase , _UpperCamelCase )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
UpperCAmelCase_ : Optional[Any] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
UpperCAmelCase_ : int = loss.sum() / (inputs['labels'] >= 0).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']" )
if self.args.gradient_accumulation_steps > 1:
UpperCAmelCase_ : str = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_UpperCamelCase ).backward()
elif self.use_apex:
with amp.scale_loss(_UpperCamelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_UpperCamelCase )
else:
loss.backward()
return loss.detach()
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
UpperCAmelCase_ : int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase_ : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , __snake_case )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
UpperCAmelCase_ : Any = datasets.load_dataset(
'common_voice' , data_args.dataset_config_name , split=data_args.train_split_name )
UpperCAmelCase_ : Dict = datasets.load_dataset('common_voice' , data_args.dataset_config_name , split='test' )
# Create and save tokenizer
UpperCAmelCase_ : str = F"[{''.join(data_args.chars_to_ignore )}]"
def remove_special_characters(__snake_case : Any ):
UpperCAmelCase_ : str = re.sub(__snake_case , '' , batch['sentence'] ).lower() + ' '
return batch
UpperCAmelCase_ : List[Any] = train_dataset.map(__snake_case , remove_columns=['sentence'] )
UpperCAmelCase_ : Any = eval_dataset.map(__snake_case , remove_columns=['sentence'] )
def extract_all_chars(__snake_case : Dict ):
UpperCAmelCase_ : Dict = ' '.join(batch['text'] )
UpperCAmelCase_ : Optional[int] = list(set(__snake_case ) )
return {"vocab": [vocab], "all_text": [all_text]}
UpperCAmelCase_ : Tuple = train_dataset.map(
__snake_case , batched=__snake_case , batch_size=-1 , keep_in_memory=__snake_case , remove_columns=train_dataset.column_names , )
UpperCAmelCase_ : List[str] = train_dataset.map(
__snake_case , batched=__snake_case , batch_size=-1 , keep_in_memory=__snake_case , remove_columns=eval_dataset.column_names , )
UpperCAmelCase_ : int = list(set(vocab_train['vocab'][0] ) | set(vocab_test['vocab'][0] ) )
UpperCAmelCase_ : Any = {v: k for k, v in enumerate(__snake_case )}
UpperCAmelCase_ : Union[str, Any] = vocab_dict[' ']
del vocab_dict[" "]
UpperCAmelCase_ : Dict = len(__snake_case )
UpperCAmelCase_ : str = len(__snake_case )
with open('vocab.json' , 'w' ) as vocab_file:
json.dump(__snake_case , __snake_case )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ : Any = WavaVecaCTCTokenizer(
'vocab.json' , unk_token='[UNK]' , pad_token='[PAD]' , word_delimiter_token='|' , )
UpperCAmelCase_ : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0.0 , do_normalize=__snake_case , return_attention_mask=__snake_case )
UpperCAmelCase_ : Any = WavaVecaProcessor(feature_extractor=__snake_case , tokenizer=__snake_case )
UpperCAmelCase_ : Optional[Any] = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='mean' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
UpperCAmelCase_ : Any = min(len(__snake_case ) , data_args.max_train_samples )
UpperCAmelCase_ : Any = train_dataset.select(range(__snake_case ) )
if data_args.max_val_samples is not None:
UpperCAmelCase_ : Optional[int] = eval_dataset.select(range(data_args.max_val_samples ) )
UpperCAmelCase_ : Union[str, Any] = torchaudio.transforms.Resample(48_000 , 16_000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(__snake_case : str ):
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = torchaudio.load(batch['path'] )
UpperCAmelCase_ : Optional[Any] = resampler(__snake_case ).squeeze().numpy()
UpperCAmelCase_ : Any = 16_000
UpperCAmelCase_ : List[str] = batch['text']
return batch
UpperCAmelCase_ : Any = train_dataset.map(
__snake_case , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
UpperCAmelCase_ : Union[str, Any] = eval_dataset.map(
__snake_case , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(__snake_case : List[Any] ):
# check that all files have the correct sampling rate
assert (
len(set(batch['sampling_rate'] ) ) == 1
), F"Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."
UpperCAmelCase_ : Any = processor(
audio=batch['speech'] , text=batch['target_text'] , sampling_rate=batch['sampling_rate'][0] )
batch.update(__snake_case )
return batch
UpperCAmelCase_ : Optional[int] = train_dataset.map(
__snake_case , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , )
UpperCAmelCase_ : List[Any] = eval_dataset.map(
__snake_case , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , )
# Metric
UpperCAmelCase_ : Union[str, Any] = datasets.load_metric('wer' )
def compute_metrics(__snake_case : List[str] ):
UpperCAmelCase_ : List[Any] = pred.predictions
UpperCAmelCase_ : str = np.argmax(__snake_case , axis=-1 )
UpperCAmelCase_ : int = processor.tokenizer.pad_token_id
UpperCAmelCase_ : Any = processor.batch_decode(__snake_case )
# we do not want to group tokens when computing the metrics
UpperCAmelCase_ : Tuple = processor.batch_decode(pred.label_ids , group_tokens=__snake_case )
UpperCAmelCase_ : int = wer_metric.compute(predictions=__snake_case , references=__snake_case )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
UpperCAmelCase_ : List[Any] = DataCollatorCTCWithPadding(processor=__snake_case , padding=__snake_case )
# Initialize our Trainer
UpperCAmelCase_ : int = CTCTrainer(
model=__snake_case , data_collator=__snake_case , args=__snake_case , compute_metrics=__snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
UpperCAmelCase_ : Union[str, Any] = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
UpperCAmelCase_ : Optional[Any] = model_args.model_name_or_path
else:
UpperCAmelCase_ : Optional[Any] = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
UpperCAmelCase_ : List[Any] = trainer.train(resume_from_checkpoint=__snake_case )
trainer.save_model()
UpperCAmelCase_ : Optional[Any] = train_result.metrics
UpperCAmelCase_ : List[str] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__snake_case )
)
UpperCAmelCase_ : Tuple = min(__snake_case , len(__snake_case ) )
trainer.log_metrics('train' , __snake_case )
trainer.save_metrics('train' , __snake_case )
trainer.save_state()
# Evaluation
UpperCAmelCase_ : List[str] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
UpperCAmelCase_ : Dict = trainer.evaluate()
UpperCAmelCase_ : Tuple = data_args.max_val_samples if data_args.max_val_samples is not None else len(__snake_case )
UpperCAmelCase_ : Optional[int] = min(__snake_case , len(__snake_case ) )
trainer.log_metrics('eval' , __snake_case )
trainer.save_metrics('eval' , __snake_case )
return results
if __name__ == "__main__":
main()
| 29 |
from __future__ import annotations
def lowercase__ ( __snake_case : tuple[int, int] , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position
UpperCAmelCase_ : str = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCAmelCase_ : Optional[Any] = []
for position in positions:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__snake_case )
return permissible_positions
def lowercase__ ( __snake_case : list[list[int]] ):
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def lowercase__ ( __snake_case : list[list[int]] , __snake_case : tuple[int, int] , __snake_case : int ):
'''simple docstring'''
if is_complete(__snake_case ):
return True
for position in get_valid_pos(__snake_case , len(__snake_case ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Any = position
if board[y][x] == 0:
UpperCAmelCase_ : Optional[Any] = curr + 1
if open_knight_tour_helper(__snake_case , __snake_case , curr + 1 ):
return True
UpperCAmelCase_ : List[Any] = 0
return False
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : str = [[0 for i in range(__snake_case )] for j in range(__snake_case )]
for i in range(__snake_case ):
for j in range(__snake_case ):
UpperCAmelCase_ : Optional[Any] = 1
if open_knight_tour_helper(__snake_case , (i, j) , 1 ):
return board
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : List[str] = F"Open Kight Tour cannot be performed on a board of size {n}"
raise ValueError(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 1 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , **_UpperCamelCase ) -> Optional[int]:
super().__init__(**_UpperCamelCase )
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
requires_backends(self , 'vision' )
self.check_model_type(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ) -> Any:
if "text_queries" in kwargs:
UpperCAmelCase_ : Tuple = kwargs.pop('text_queries' )
if isinstance(_UpperCamelCase , (str, Image.Image) ):
UpperCAmelCase_ : Optional[Any] = {'image': image, 'candidate_labels': candidate_labels}
else:
UpperCAmelCase_ : Union[str, Any] = image
UpperCAmelCase_ : Tuple = super().__call__(_UpperCamelCase , **_UpperCamelCase )
return results
def __UpperCAmelCase ( self , **_UpperCamelCase ) -> str:
UpperCAmelCase_ : Tuple = {}
if "threshold" in kwargs:
UpperCAmelCase_ : str = kwargs['threshold']
if "top_k" in kwargs:
UpperCAmelCase_ : str = kwargs['top_k']
return {}, {}, postprocess_params
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = load_image(inputs['image'] )
UpperCAmelCase_ : List[Any] = inputs['candidate_labels']
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Optional[Any] = candidate_labels.split(',' )
UpperCAmelCase_ : Optional[int] = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(_UpperCamelCase ):
UpperCAmelCase_ : Tuple = self.tokenizer(_UpperCamelCase , return_tensors=self.framework )
UpperCAmelCase_ : Union[str, Any] = self.image_processor(_UpperCamelCase , return_tensors=self.framework )
yield {
"is_last": i == len(_UpperCamelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : List[str] = model_inputs.pop('target_size' )
UpperCAmelCase_ : Dict = model_inputs.pop('candidate_label' )
UpperCAmelCase_ : Any = model_inputs.pop('is_last' )
UpperCAmelCase_ : Tuple = self.model(**_UpperCamelCase )
UpperCAmelCase_ : Any = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=0.1 , _UpperCamelCase=None ) -> Tuple:
UpperCAmelCase_ : Optional[Any] = []
for model_output in model_outputs:
UpperCAmelCase_ : Optional[Any] = model_output['candidate_label']
UpperCAmelCase_ : List[str] = BaseModelOutput(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = self.image_processor.post_process_object_detection(
outputs=_UpperCamelCase , threshold=_UpperCamelCase , target_sizes=model_output['target_size'] )[0]
for index in outputs["scores"].nonzero():
UpperCAmelCase_ : List[Any] = outputs['scores'][index].item()
UpperCAmelCase_ : Dict = self._get_bounding_box(outputs['boxes'][index][0] )
UpperCAmelCase_ : List[Any] = {'score': score, 'label': label, 'box': box}
results.append(_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = sorted(_UpperCamelCase , key=lambda _UpperCamelCase : x["score"] , reverse=_UpperCamelCase )
if top_k:
UpperCAmelCase_ : List[str] = results[:top_k]
return results
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = box.int().tolist()
UpperCAmelCase_ : Optional[Any] = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 29 |
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : list[list[int]] = [[0 for _ in range(__snake_case )] for _ in range(m + 1 )]
for i in range(m + 1 ):
UpperCAmelCase_ : Optional[Any] = 1
for n in range(m + 1 ):
for k in range(1 , __snake_case ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__UpperCAmelCase = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 29 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase (_snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : Tuple = KandinskyInpaintPipeline
_snake_case : Union[str, Any] = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
_snake_case : Any = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
_snake_case : List[str] = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_snake_case : Optional[Any] = False
@property
def __UpperCAmelCase ( self ) -> Any:
return 3_2
@property
def __UpperCAmelCase ( self ) -> Tuple:
return 3_2
@property
def __UpperCAmelCase ( self ) -> int:
return self.time_input_dim
@property
def __UpperCAmelCase ( self ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def __UpperCAmelCase ( self ) -> Optional[Any]:
return 1_0_0
@property
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def __UpperCAmelCase ( self ) -> str:
torch.manual_seed(0 )
UpperCAmelCase_ : int = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
UpperCAmelCase_ : Union[str, Any] = MultilingualCLIP(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = text_encoder.eval()
return text_encoder
@property
def __UpperCAmelCase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
UpperCAmelCase_ : Tuple = UNetaDConditionModel(**_UpperCamelCase )
return model
@property
def __UpperCAmelCase ( self ) -> str:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
UpperCAmelCase_ : Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = self.dummy_text_encoder
UpperCAmelCase_ : Optional[Any] = self.dummy_tokenizer
UpperCAmelCase_ : Dict = self.dummy_unet
UpperCAmelCase_ : Dict = self.dummy_movq
UpperCAmelCase_ : List[Any] = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=_UpperCamelCase , set_alpha_to_one=_UpperCamelCase , steps_offset=1 , prediction_type='epsilon' , thresholding=_UpperCamelCase , )
UpperCAmelCase_ : Optional[int] = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
UpperCAmelCase_ : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_UpperCamelCase )
# create init_image
UpperCAmelCase_ : List[str] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
UpperCAmelCase_ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : Dict = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
# create mask
UpperCAmelCase_ : Union[str, Any] = np.ones((6_4, 6_4) , dtype=np.floataa )
UpperCAmelCase_ : Optional[int] = 0
if str(_UpperCamelCase ).startswith('mps' ):
UpperCAmelCase_ : List[Any] = torch.manual_seed(_UpperCamelCase )
else:
UpperCAmelCase_ : Dict = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
UpperCAmelCase_ : Dict = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : int = 'cpu'
UpperCAmelCase_ : int = self.get_dummy_components()
UpperCAmelCase_ : Tuple = self.pipeline_class(**_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = pipe(**self.get_dummy_inputs(_UpperCamelCase ) )
UpperCAmelCase_ : Tuple = output.images
UpperCAmelCase_ : int = pipe(
**self.get_dummy_inputs(_UpperCamelCase ) , return_dict=_UpperCamelCase , )[0]
UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Any = image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase_ : int = np.array(
[0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def __UpperCAmelCase ( self ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
UpperCAmelCase_ : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
UpperCAmelCase_ : Dict = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : int = 'a hat'
UpperCAmelCase_ : int = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_UpperCamelCase )
UpperCAmelCase_ : List[str] = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint' , torch_dtype=torch.floataa )
UpperCAmelCase_ : Optional[int] = pipeline.to(_UpperCamelCase )
pipeline.set_progress_bar_config(disable=_UpperCamelCase )
UpperCAmelCase_ : Dict = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCAmelCase_ , UpperCAmelCase_ : str = pipe_prior(
_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
UpperCAmelCase_ : List[Any] = pipeline(
_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , image_embeds=_UpperCamelCase , negative_image_embeds=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type='np' , )
UpperCAmelCase_ : Tuple = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase )
| 29 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int:
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
self.check_model_type(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = {}, {}
if padding is not None:
UpperCAmelCase_ : List[str] = padding
if truncation is not None:
UpperCAmelCase_ : Tuple = truncation
if top_k is not None:
UpperCAmelCase_ : Dict = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> int:
if isinstance(_UpperCamelCase , (Image.Image, str) ) and isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Optional[Any] = {'image': image, 'question': question}
else:
UpperCAmelCase_ : List[str] = image
UpperCAmelCase_ : Optional[Any] = super().__call__(_UpperCamelCase , **_UpperCamelCase )
return results
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = load_image(inputs['image'] )
UpperCAmelCase_ : Dict = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=_UpperCamelCase , truncation=_UpperCamelCase )
UpperCAmelCase_ : int = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework )
model_inputs.update(_UpperCamelCase )
return model_inputs
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : Any = self.model(**_UpperCamelCase )
return model_outputs
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=5 ) -> str:
if top_k > self.model.config.num_labels:
UpperCAmelCase_ : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ : List[str] = model_outputs.logits.sigmoid()[0]
UpperCAmelCase_ , UpperCAmelCase_ : str = probs.topk(_UpperCamelCase )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
UpperCAmelCase_ : Optional[Any] = scores.tolist()
UpperCAmelCase_ : Tuple = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase )]
| 29 | 1 |
import math
def lowercase__ ( __snake_case : list , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = len(__snake_case )
UpperCAmelCase_ : Optional[int] = int(math.floor(math.sqrt(__snake_case ) ) )
UpperCAmelCase_ : Optional[Any] = 0
while arr[min(__snake_case , __snake_case ) - 1] < x:
UpperCAmelCase_ : int = step
step += int(math.floor(math.sqrt(__snake_case ) ) )
if prev >= n:
return -1
while arr[prev] < x:
UpperCAmelCase_ : Union[str, Any] = prev + 1
if prev == min(__snake_case , __snake_case ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
__UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip()
__UpperCAmelCase = [int(item) for item in user_input.split(',')]
__UpperCAmelCase = int(input('Enter the number to be searched:\n'))
__UpperCAmelCase = jump_search(arr, x)
if res == -1:
print('Number not found!')
else:
print(F'Number {x} is at index {res}')
| 29 |
import os
# Precomputes a list of the 100 first triangular numbers
__UpperCAmelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Any = os.path.dirname(os.path.realpath(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = os.path.join(__snake_case , 'words.txt' )
UpperCAmelCase_ : Union[str, Any] = ''
with open(__snake_case ) as f:
UpperCAmelCase_ : List[Any] = f.readline()
UpperCAmelCase_ : Optional[int] = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
UpperCAmelCase_ : Optional[int] = [
word
for word in [sum(ord(__snake_case ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__snake_case )
if __name__ == "__main__":
print(solution())
| 29 | 1 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = 9, 14 # noqa: F841
UpperCAmelCase_ : Optional[Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
UpperCAmelCase_ : int = defaultdict(__snake_case )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
UpperCAmelCase_ : List[Any] = mst(__snake_case )
UpperCAmelCase_ : Any = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
UpperCAmelCase_ : str = tuple(answer[:2] )
UpperCAmelCase_ : Union[str, Any] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 29 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__UpperCAmelCase = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__UpperCAmelCase = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
if "://" in dataset_path:
UpperCAmelCase_ : int = dataset_path.split('://' )[1]
return dataset_path
def lowercase__ ( __snake_case : fsspec.AbstractFileSystem ):
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( __snake_case : fsspec.AbstractFileSystem , __snake_case : str , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = not is_remote_filesystem(__snake_case )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__snake_case ) , fs._strip_protocol(__snake_case ) )
else:
fs.mv(__snake_case , __snake_case , recursive=__snake_case )
def lowercase__ ( ):
'''simple docstring'''
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : int = threading.Lock()
| 29 | 1 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __UpperCAmelCase ( self , _UpperCamelCase ) -> int:
with open(_UpperCamelCase , encoding='utf-8' ) as input_file:
UpperCAmelCase_ : int = re.compile(r'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' )
UpperCAmelCase_ : Dict = input_file.read()
UpperCAmelCase_ : Union[str, Any] = regexp.search(_UpperCamelCase )
return match
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Union[str, Any]:
with open(_UpperCamelCase , encoding='utf-8' ) as input_file:
UpperCAmelCase_ : Tuple = re.compile(r'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL )
UpperCAmelCase_ : str = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCAmelCase_ : Optional[int] = regexp.finditer(_UpperCamelCase )
UpperCAmelCase_ : List[str] = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : List[str] = Path('./datasets' )
UpperCAmelCase_ : List[Any] = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_UpperCamelCase ) ):
raise AssertionError(f"open(...) must use utf-8 encoding in {dataset}" )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : Optional[int] = Path('./datasets' )
UpperCAmelCase_ : Optional[Any] = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_print_statements(str(_UpperCamelCase ) ):
raise AssertionError(f"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 29 |
def lowercase__ ( __snake_case : list ):
'''simple docstring'''
for i in range(len(__snake_case ) - 1 , 0 , -1 ):
UpperCAmelCase_ : Dict = False
for j in range(__snake_case , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Any = unsorted[j - 1], unsorted[j]
UpperCAmelCase_ : int = True
for j in range(__snake_case ):
if unsorted[j] > unsorted[j + 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = unsorted[j + 1], unsorted[j]
UpperCAmelCase_ : Any = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip()
__UpperCAmelCase = [int(item) for item in user_input.split(',')]
print(F'{cocktail_shaker_sort(unsorted) = }')
| 29 | 1 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowercase__ ( __snake_case : Optional[int] , __snake_case : Dict="shi-labs/oneformer_demo" ):
'''simple docstring'''
with open(hf_hub_download(__snake_case , __snake_case , repo_type='dataset' ) , 'r' ) as f:
UpperCAmelCase_ : Optional[Any] = json.load(__snake_case )
UpperCAmelCase_ : str = {}
UpperCAmelCase_ : str = []
UpperCAmelCase_ : List[Any] = []
for key, info in class_info.items():
UpperCAmelCase_ : Dict = info['name']
class_names.append(info['name'] )
if info["isthing"]:
thing_ids.append(int(__snake_case ) )
UpperCAmelCase_ : Tuple = thing_ids
UpperCAmelCase_ : Optional[Any] = class_names
return metadata
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase=7 , _UpperCamelCase=3 , _UpperCamelCase=3_0 , _UpperCamelCase=4_0_0 , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=[0.5, 0.5, 0.5] , _UpperCamelCase=[0.5, 0.5, 0.5] , _UpperCamelCase=1_0 , _UpperCamelCase=False , _UpperCamelCase=2_5_5 , _UpperCamelCase="shi-labs/oneformer_demo" , _UpperCamelCase="ade20k_panoptic.json" , _UpperCamelCase=1_0 , ) -> Tuple:
UpperCAmelCase_ : Optional[int] = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : int = num_channels
UpperCAmelCase_ : int = min_resolution
UpperCAmelCase_ : Union[str, Any] = max_resolution
UpperCAmelCase_ : Optional[int] = do_resize
UpperCAmelCase_ : Any = {'shortest_edge': 3_2, 'longest_edge': 1_3_3_3} if size is None else size
UpperCAmelCase_ : Optional[int] = do_normalize
UpperCAmelCase_ : Optional[Any] = image_mean
UpperCAmelCase_ : List[str] = image_std
UpperCAmelCase_ : Union[str, Any] = class_info_file
UpperCAmelCase_ : str = prepare_metadata(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Any = num_text
UpperCAmelCase_ : int = repo_path
# for the post_process_functions
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : Dict = 1_0
UpperCAmelCase_ : Any = 1_0
UpperCAmelCase_ : int = 3
UpperCAmelCase_ : Union[str, Any] = 4
UpperCAmelCase_ : Optional[int] = num_labels
UpperCAmelCase_ : int = do_reduce_labels
UpperCAmelCase_ : Tuple = ignore_index
def __UpperCAmelCase ( self ) -> List[str]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False ) -> Dict:
if not batched:
UpperCAmelCase_ : Any = image_inputs[0]
if isinstance(_UpperCamelCase , Image.Image ):
UpperCAmelCase_ , UpperCAmelCase_ : int = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ : int = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase_ : List[str] = int(self.size['shortest_edge'] * h / w )
UpperCAmelCase_ : Optional[int] = self.size['shortest_edge']
elif w > h:
UpperCAmelCase_ : Optional[int] = self.size['shortest_edge']
UpperCAmelCase_ : Dict = int(self.size['shortest_edge'] * w / h )
else:
UpperCAmelCase_ : Optional[Any] = self.size['shortest_edge']
UpperCAmelCase_ : Dict = self.size['shortest_edge']
else:
UpperCAmelCase_ : List[str] = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase_ : str = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[0] )[0]
UpperCAmelCase_ : str = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[1] )[1]
return expected_height, expected_width
def __UpperCAmelCase ( self ) -> Union[str, Any]:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class lowerCamelCase (_snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : str = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
_snake_case : Union[str, Any] = image_processing_class
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : List[str] = OneFormerImageProcessorTester(self )
@property
def __UpperCAmelCase ( self ) -> Dict:
return self.image_processing_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'size' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'ignore_index' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'class_info_file' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'num_text' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'repo_path' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'metadata' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_reduce_labels' ) )
def __UpperCAmelCase ( self ) -> Tuple:
pass
def __UpperCAmelCase ( self ) -> List[str]:
# Initialize image_processor
UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ : Union[str, Any] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : str = self.image_processing_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ : str = self.image_processing_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
UpperCAmelCase_ : Tuple = image_processor(
_UpperCamelCase , ['semantic'] * len(_UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self ) -> Optional[Any]:
# Initialize image_processor
UpperCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processing_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
UpperCAmelCase_ : Optional[Any] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.image_processing_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ : str = self.image_processing_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = image_processor(
_UpperCamelCase , ['semantic'] * len(_UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self ) -> List[str]:
# Initialize image_processor
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Optional[int] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.image_processing_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ : int = self.image_processing_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = image_processor(
_UpperCamelCase , ['semantic'] * len(_UpperCamelCase ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __UpperCAmelCase ( self , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase="np" ) -> Dict:
UpperCAmelCase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
UpperCAmelCase_ : Optional[Any] = self.image_processing_tester.num_labels
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processing_tester , equal_resolution=_UpperCamelCase )
if with_segmentation_maps:
UpperCAmelCase_ : Tuple = num_labels
if is_instance_map:
UpperCAmelCase_ : Dict = list(range(_UpperCamelCase ) ) * 2
UpperCAmelCase_ : Any = dict(enumerate(_UpperCamelCase ) )
UpperCAmelCase_ : Dict = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
UpperCAmelCase_ : Tuple = [Image.fromarray(_UpperCamelCase ) for annotation in annotations]
UpperCAmelCase_ : Optional[int] = image_processor(
_UpperCamelCase , ['semantic'] * len(_UpperCamelCase ) , _UpperCamelCase , return_tensors='pt' , instance_id_to_semantic_id=_UpperCamelCase , pad_and_return_pixel_mask=_UpperCamelCase , )
return inputs
def __UpperCAmelCase ( self ) -> Any:
pass
def __UpperCAmelCase ( self ) -> List[str]:
def common(_UpperCamelCase=False , _UpperCamelCase=None ):
UpperCAmelCase_ : int = self.comm_get_image_processor_inputs(
with_segmentation_maps=_UpperCamelCase , is_instance_map=_UpperCamelCase , segmentation_type=_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = inputs['mask_labels']
UpperCAmelCase_ : Optional[Any] = inputs['class_labels']
UpperCAmelCase_ : Dict = inputs['pixel_values']
UpperCAmelCase_ : List[str] = inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(_UpperCamelCase ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=_UpperCamelCase )
common(is_instance_map=_UpperCamelCase , segmentation_type='pil' )
common(is_instance_map=_UpperCamelCase , segmentation_type='pil' )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = np.zeros((2_0, 5_0) )
UpperCAmelCase_ : Optional[Any] = 1
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : List[Any] = binary_mask_to_rle(_UpperCamelCase )
self.assertEqual(len(_UpperCamelCase ) , 4 )
self.assertEqual(rle[0] , 2_1 )
self.assertEqual(rle[1] , 4_5 )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : Optional[int] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
UpperCAmelCase_ : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase_ : str = fature_extractor.post_process_semantic_segmentation(_UpperCamelCase )
self.assertEqual(len(_UpperCamelCase ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
UpperCAmelCase_ : str = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
UpperCAmelCase_ : List[Any] = fature_extractor.post_process_semantic_segmentation(_UpperCamelCase , target_sizes=_UpperCamelCase )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
UpperCAmelCase_ : Any = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase_ : Tuple = image_processor.post_process_instance_segmentation(_UpperCamelCase , threshold=0 )
self.assertTrue(len(_UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , _UpperCamelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : List[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
UpperCAmelCase_ : Tuple = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCAmelCase_ : Optional[Any] = image_processor.post_process_panoptic_segmentation(_UpperCamelCase , threshold=0 )
self.assertTrue(len(_UpperCamelCase ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , _UpperCamelCase )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 29 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def lowercase__ ( __snake_case : List[str] , __snake_case : int , __snake_case : Tuple=8 ):
'''simple docstring'''
UpperCAmelCase_ : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase_ : List[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowercase__ ( __snake_case : Any , __snake_case : int=512 , __snake_case : Dict=512 ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCAmelCase_ : Dict = np.array(pil_image.convert('RGB' ) )
UpperCAmelCase_ : Any = arr.astype(np.floataa ) / 127.5 - 1
UpperCAmelCase_ : Dict = np.transpose(__snake_case , [2, 0, 1] )
UpperCAmelCase_ : List[str] = torch.from_numpy(__snake_case ).unsqueeze(0 )
return image
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
unet=_UpperCamelCase , scheduler=_UpperCamelCase , movq=_UpperCamelCase , )
UpperCAmelCase_ : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
# get the original timestep using init_timestep
UpperCAmelCase_ : Any = min(int(num_inference_steps * strength ) , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase_ : str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> Tuple:
if not isinstance(_UpperCamelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_UpperCamelCase )}" )
UpperCAmelCase_ : List[str] = image.to(device=_UpperCamelCase , dtype=_UpperCamelCase )
UpperCAmelCase_ : List[str] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCAmelCase_ : List[str] = image
else:
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_UpperCamelCase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Any = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCamelCase )
]
UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase , dim=0 )
else:
UpperCAmelCase_ : Union[str, Any] = self.movq.encode(_UpperCamelCase ).latent_dist.sample(_UpperCamelCase )
UpperCAmelCase_ : int = self.movq.config.scaling_factor * init_latents
UpperCAmelCase_ : Optional[int] = torch.cat([init_latents] , dim=0 )
UpperCAmelCase_ : Tuple = init_latents.shape
UpperCAmelCase_ : List[Any] = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
# get latents
UpperCAmelCase_ : str = self.scheduler.add_noise(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = init_latents
return latents
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Any:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase_ : Optional[Any] = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase_ : str = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = cpu_offload_with_hook(_UpperCamelCase , _UpperCamelCase , prev_module_hook=_UpperCamelCase )
# We'll offload the last model manually.
UpperCAmelCase_ : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self ) -> Dict:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_UpperCamelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 1_0_0 , _UpperCamelCase = 4.0 , _UpperCamelCase = 0.3 , _UpperCamelCase = 1 , _UpperCamelCase = None , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> str:
UpperCAmelCase_ : Any = self._execution_device
UpperCAmelCase_ : Union[str, Any] = guidance_scale > 1.0
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : str = torch.cat(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[Any] = image_embeds.shape[0]
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : int = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : int = negative_image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_UpperCamelCase )
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Tuple = [image]
if not all(isinstance(_UpperCamelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(_UpperCamelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
UpperCAmelCase_ : str = torch.cat([prepare_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) for i in image] , dim=0 )
UpperCAmelCase_ : Any = image.to(dtype=image_embeds.dtype , device=_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.movq.encode(_UpperCamelCase )['latents']
UpperCAmelCase_ : List[Any] = latents.repeat_interleave(_UpperCamelCase , dim=0 )
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.get_timesteps(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCAmelCase_ , UpperCAmelCase_ : str = downscale_height_and_width(_UpperCamelCase , _UpperCamelCase , self.movq_scale_factor )
UpperCAmelCase_ : Dict = self.prepare_latents(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : str = {'image_embeds': image_embeds}
UpperCAmelCase_ : Union[str, Any] = self.unet(
sample=_UpperCamelCase , timestep=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , added_cond_kwargs=_UpperCamelCase , return_dict=_UpperCamelCase , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 )
UpperCAmelCase_ , UpperCAmelCase_ : str = variance_pred.chunk(2 )
UpperCAmelCase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ , UpperCAmelCase_ : int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : List[str] = self.scheduler.step(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase , )[0]
# post-processing
UpperCAmelCase_ : Optional[Any] = self.movq.decode(_UpperCamelCase , force_not_quantize=_UpperCamelCase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCAmelCase_ : List[str] = image * 0.5 + 0.5
UpperCAmelCase_ : List[Any] = image.clamp(0 , 1 )
UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : List[Any] = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCamelCase )
| 29 | 1 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__UpperCAmelCase = logging.getLogger(__name__)
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : List[Any] = '''summarization'''
_snake_case : Optional[int] = ['''loss''']
_snake_case : str = ROUGE_KEYS
_snake_case : Tuple = '''rouge2'''
def __init__( self , _UpperCamelCase , **_UpperCamelCase ) -> List[str]:
if hparams.sortish_sampler and hparams.gpus > 1:
UpperCAmelCase_ : Union[str, Any] = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' )
if hparams.sortish_sampler:
raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' )
super().__init__(_UpperCamelCase , num_labels=_UpperCamelCase , mode=self.mode , **_UpperCamelCase )
use_task_specific_params(self.model , 'summarization' )
save_git_info(self.hparams.output_dir )
UpperCAmelCase_ : Optional[int] = Path(self.output_dir ) / 'metrics.json'
UpperCAmelCase_ : str = Path(self.output_dir ) / 'hparams.pkl'
pickle_save(self.hparams , self.hparams_save_path )
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : Optional[int] = defaultdict(_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = self.config.model_type
UpperCAmelCase_ : Any = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
UpperCAmelCase_ : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
UpperCAmelCase_ : Tuple = {
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
UpperCAmelCase_ : Union[str, Any] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
UpperCAmelCase_ : str = {
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f"target_lens: {self.target_lens}"
assert self.target_lens["train"] <= self.target_lens["test"], f"target_lens: {self.target_lens}"
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
UpperCAmelCase_ : Any = get_git_info()['repo_sha']
UpperCAmelCase_ : int = hparams.num_workers
UpperCAmelCase_ : List[Any] = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _UpperCamelCase ):
UpperCAmelCase_ : Dict = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
UpperCAmelCase_ : Dict = self.decoder_start_token_id
UpperCAmelCase_ : List[Any] = (
SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset
)
UpperCAmelCase_ : str = False
UpperCAmelCase_ : List[Any] = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
UpperCAmelCase_ : Optional[int] = self.hparams.eval_max_gen_length
else:
UpperCAmelCase_ : Optional[Any] = self.model.config.max_length
UpperCAmelCase_ : List[str] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Dict[str, List[str]]:
UpperCAmelCase_ : Optional[Any] = {
k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(_UpperCamelCase , Path(self.output_dir ) / 'text_batch.json' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' )
UpperCAmelCase_ : Union[str, Any] = True
return readable_batch
def __UpperCAmelCase ( self , _UpperCamelCase , **_UpperCamelCase ) -> List[str]:
return self.model(_UpperCamelCase , **_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : Dict = self.tokenizer.batch_decode(
_UpperCamelCase , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
return lmap(str.strip , _UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Tuple:
UpperCAmelCase_ : int = self.tokenizer.pad_token_id
UpperCAmelCase_ , UpperCAmelCase_ : Dict = batch['input_ids'], batch['attention_mask']
UpperCAmelCase_ : int = batch['labels']
if isinstance(self.model , _UpperCamelCase ):
UpperCAmelCase_ : str = self.model._shift_right(_UpperCamelCase )
else:
UpperCAmelCase_ : str = shift_tokens_right(_UpperCamelCase , _UpperCamelCase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
UpperCAmelCase_ : Any = decoder_input_ids
self.save_readable_batch(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = self(_UpperCamelCase , attention_mask=_UpperCamelCase , decoder_input_ids=_UpperCamelCase , use_cache=_UpperCamelCase )
UpperCAmelCase_ : Tuple = outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
UpperCAmelCase_ : Any = nn.CrossEntropyLoss(ignore_index=_UpperCamelCase )
assert lm_logits.shape[-1] == self.vocab_size
UpperCAmelCase_ : Optional[int] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
UpperCAmelCase_ : Optional[Any] = nn.functional.log_softmax(_UpperCamelCase , dim=-1 )
UpperCAmelCase_ , UpperCAmelCase_ : str = label_smoothed_nll_loss(
_UpperCamelCase , _UpperCamelCase , self.hparams.label_smoothing , ignore_index=_UpperCamelCase )
return (loss,)
@property
def __UpperCAmelCase ( self ) -> int:
return self.tokenizer.pad_token_id
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : List[Any] = self._step(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = dict(zip(self.loss_names , _UpperCamelCase ) )
# tokens per batch
UpperCAmelCase_ : Tuple = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum()
UpperCAmelCase_ : Dict = batch['input_ids'].shape[0]
UpperCAmelCase_ : Optional[int] = batch['input_ids'].eq(self.pad ).sum()
UpperCAmelCase_ : List[str] = batch['input_ids'].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> Dict:
return self._generative_step(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase="val" ) -> Dict:
self.step_count += 1
UpperCAmelCase_ : int = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
UpperCAmelCase_ : Optional[Any] = losses['loss']
UpperCAmelCase_ : Tuple = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
UpperCAmelCase_ : Tuple = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
UpperCAmelCase_ : torch.FloatTensor = torch.tensor(_UpperCamelCase ).type_as(_UpperCamelCase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_UpperCamelCase )
UpperCAmelCase_ : str = {f"{prefix}_avg_{k}": x for k, x in losses.items()}
UpperCAmelCase_ : Optional[int] = self.step_count
self.metrics[prefix].append(_UpperCamelCase ) # callback writes this to self.metrics_save_path
UpperCAmelCase_ : int = flatten_list([x['preds'] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f"{prefix}_loss": loss,
f"{prefix}_{self.val_metric}": metric_tensor,
}
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> Dict:
return calculate_rouge(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> dict:
UpperCAmelCase_ : Dict = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
UpperCAmelCase_ : int = self.model.generate(
batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=_UpperCamelCase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
UpperCAmelCase_ : Optional[Any] = (time.time() - ta) / batch['input_ids'].shape[0]
UpperCAmelCase_ : List[str] = self.ids_to_clean_text(_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.ids_to_clean_text(batch['labels'] )
UpperCAmelCase_ : str = self._step(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = dict(zip(self.loss_names , _UpperCamelCase ) )
UpperCAmelCase_ : Dict = self.calc_generative_metrics(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = np.mean(lmap(_UpperCamelCase , _UpperCamelCase ) )
base_metrics.update(gen_time=_UpperCamelCase , gen_len=_UpperCamelCase , preds=_UpperCamelCase , target=_UpperCamelCase , **_UpperCamelCase )
return base_metrics
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> int:
return self._generative_step(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Dict:
return self.validation_epoch_end(_UpperCamelCase , prefix='test' )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> SeqaSeqDataset:
UpperCAmelCase_ : int = self.n_obs[type_path]
UpperCAmelCase_ : List[str] = self.target_lens[type_path]
UpperCAmelCase_ : List[str] = self.dataset_class(
self.tokenizer , type_path=_UpperCamelCase , n_obs=_UpperCamelCase , max_target_length=_UpperCamelCase , **self.dataset_kwargs , )
return dataset
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = False ) -> DataLoader:
UpperCAmelCase_ : List[str] = self.get_dataset(_UpperCamelCase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
UpperCAmelCase_ : Optional[Any] = dataset.make_sortish_sampler(_UpperCamelCase , distributed=self.hparams.gpus > 1 )
return DataLoader(
_UpperCamelCase , batch_size=_UpperCamelCase , collate_fn=dataset.collate_fn , shuffle=_UpperCamelCase , num_workers=self.num_workers , sampler=_UpperCamelCase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
UpperCAmelCase_ : str = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
_UpperCamelCase , batch_sampler=_UpperCamelCase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_UpperCamelCase , batch_size=_UpperCamelCase , collate_fn=dataset.collate_fn , shuffle=_UpperCamelCase , num_workers=self.num_workers , sampler=_UpperCamelCase , )
def __UpperCAmelCase ( self ) -> DataLoader:
UpperCAmelCase_ : List[Any] = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=_UpperCamelCase )
return dataloader
def __UpperCAmelCase ( self ) -> DataLoader:
return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size )
def __UpperCAmelCase ( self ) -> DataLoader:
return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> Dict:
BaseTransformer.add_model_specific_args(_UpperCamelCase , _UpperCamelCase )
add_generic_args(_UpperCamelCase , _UpperCamelCase )
parser.add_argument(
'--max_source_length' , default=1_0_2_4 , type=_UpperCamelCase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--max_target_length' , default=5_6 , type=_UpperCamelCase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--val_max_target_length' , default=1_4_2 , type=_UpperCamelCase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--test_max_target_length' , default=1_4_2 , type=_UpperCamelCase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument('--freeze_encoder' , action='store_true' )
parser.add_argument('--freeze_embeds' , action='store_true' )
parser.add_argument('--sortish_sampler' , action='store_true' , default=_UpperCamelCase )
parser.add_argument('--overwrite_output_dir' , action='store_true' , default=_UpperCamelCase )
parser.add_argument('--max_tokens_per_batch' , type=_UpperCamelCase , default=_UpperCamelCase )
parser.add_argument('--logger_name' , type=_UpperCamelCase , choices=['default', 'wandb', 'wandb_shared'] , default='default' )
parser.add_argument('--n_train' , type=_UpperCamelCase , default=-1 , required=_UpperCamelCase , help='# examples. -1 means use all.' )
parser.add_argument('--n_val' , type=_UpperCamelCase , default=5_0_0 , required=_UpperCamelCase , help='# examples. -1 means use all.' )
parser.add_argument('--n_test' , type=_UpperCamelCase , default=-1 , required=_UpperCamelCase , help='# examples. -1 means use all.' )
parser.add_argument(
'--task' , type=_UpperCamelCase , default='summarization' , required=_UpperCamelCase , help='# examples. -1 means use all.' )
parser.add_argument('--label_smoothing' , type=_UpperCamelCase , default=0.0 , required=_UpperCamelCase )
parser.add_argument('--src_lang' , type=_UpperCamelCase , default='' , required=_UpperCamelCase )
parser.add_argument('--tgt_lang' , type=_UpperCamelCase , default='' , required=_UpperCamelCase )
parser.add_argument('--eval_beams' , type=_UpperCamelCase , default=_UpperCamelCase , required=_UpperCamelCase )
parser.add_argument(
'--val_metric' , type=_UpperCamelCase , default=_UpperCamelCase , required=_UpperCamelCase , choices=['bleu', 'rouge2', 'loss', None] )
parser.add_argument('--eval_max_gen_length' , type=_UpperCamelCase , default=_UpperCamelCase , help='never generate more than n tokens' )
parser.add_argument('--save_top_k' , type=_UpperCamelCase , default=1 , required=_UpperCamelCase , help='How many checkpoints to save' )
parser.add_argument(
'--early_stopping_patience' , type=_UpperCamelCase , default=-1 , required=_UpperCamelCase , help=(
'-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'
' val_check_interval will effect it.'
) , )
return parser
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : int = '''translation'''
_snake_case : Dict = ['''loss''']
_snake_case : str = ['''bleu''']
_snake_case : str = '''bleu'''
def __init__( self , _UpperCamelCase , **_UpperCamelCase ) -> List[str]:
super().__init__(_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : List[str] = hparams.src_lang
UpperCAmelCase_ : List[str] = hparams.tgt_lang
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> dict:
return calculate_bleu(_UpperCamelCase , _UpperCamelCase )
def lowercase__ ( __snake_case : Dict , __snake_case : List[Any]=None ):
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=__snake_case )
check_output_dir(__snake_case , expected_items=3 )
if model is None:
if "summarization" in args.task:
UpperCAmelCase_ : SummarizationModule = SummarizationModule(__snake_case )
else:
UpperCAmelCase_ : SummarizationModule = TranslationModule(__snake_case )
UpperCAmelCase_ : List[str] = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('/tmp' )
or str(args.output_dir ).startswith('/var' )
):
UpperCAmelCase_ : Dict = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
UpperCAmelCase_ : Optional[Any] = os.environ.get('WANDB_PROJECT' , __snake_case )
UpperCAmelCase_ : int = WandbLogger(name=model.output_dir.name , project=__snake_case )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
UpperCAmelCase_ : str = WandbLogger(name=model.output_dir.name , project=F"hf_{dataset}" )
if args.early_stopping_patience >= 0:
UpperCAmelCase_ : Any = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
UpperCAmelCase_ : Dict = False
UpperCAmelCase_ : Dict = args.val_metric == 'loss'
UpperCAmelCase_ : pl.Trainer = generic_train(
__snake_case , __snake_case , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , __snake_case ) , early_stopping_callback=__snake_case , logger=__snake_case , )
pickle_save(model.hparams , model.output_dir / 'hparams.pkl' )
if not args.do_predict:
return model
UpperCAmelCase_ : Tuple = ''
UpperCAmelCase_ : List[Any] = sorted(glob.glob(os.path.join(args.output_dir , '*.ckpt' ) , recursive=__snake_case ) )
if checkpoints:
UpperCAmelCase_ : int = checkpoints[-1]
UpperCAmelCase_ : Optional[Any] = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
__UpperCAmelCase = pl.Trainer.add_argparse_args(parser)
__UpperCAmelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__UpperCAmelCase = parser.parse_args()
main(args)
| 29 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowercase__ ( __snake_case : List[Any] , __snake_case : List[str]=False ):
'''simple docstring'''
try:
UpperCAmelCase_ : int = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCAmelCase_ : Optional[int] = default
else:
# KEY is set, convert it to True or False.
try:
UpperCAmelCase_ : List[Any] = strtobool(__snake_case )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
__UpperCAmelCase = parse_flag_from_env('RUN_SLOW', default=False)
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
return unittest.skip('Test was skipped' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(__snake_case )
def lowercase__ ( __snake_case : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(__snake_case )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(__snake_case )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(__snake_case )
def lowercase__ ( __snake_case : Dict=None , __snake_case : Dict=None ):
'''simple docstring'''
if test_case is None:
return partial(__snake_case , version=__snake_case )
return unittest.skipUnless(is_torch_version('>=' , __snake_case ) , F"test requires torch version >= {version}" )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(__snake_case )
__UpperCAmelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(__snake_case )
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
_snake_case : Union[str, Any] = True
@classmethod
def __UpperCAmelCase ( cls ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = tempfile.mkdtemp()
@classmethod
def __UpperCAmelCase ( cls ) -> List[str]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __UpperCAmelCase ( self ) -> str:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_UpperCamelCase )
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Optional[int]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : List[Any] = mocks if isinstance(_UpperCamelCase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = AcceleratorState()
UpperCAmelCase_ : str = tensor[None].clone().to(state.device )
UpperCAmelCase_ : List[str] = gather(__snake_case ).cpu()
UpperCAmelCase_ : List[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __snake_case ):
return False
return True
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : str = returncode
UpperCAmelCase_ : Optional[Any] = stdout
UpperCAmelCase_ : Optional[Any] = stderr
async def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Optional[int] ):
'''simple docstring'''
while True:
UpperCAmelCase_ : Dict = await stream.readline()
if line:
callback(__snake_case )
else:
break
async def lowercase__ ( __snake_case : Optional[int] , __snake_case : Dict=None , __snake_case : str=None , __snake_case : Dict=None , __snake_case : List[str]=False , __snake_case : Optional[int]=False ):
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__snake_case , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__snake_case , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : str = []
def tee(__snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Optional[int]="" ):
UpperCAmelCase_ : List[str] = line.decode('utf-8' ).rstrip()
sink.append(__snake_case )
if not quiet:
print(__snake_case , __snake_case , file=__snake_case )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __snake_case : tee(__snake_case , __snake_case , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __snake_case : tee(__snake_case , __snake_case , sys.stderr , label='stderr:' ) ) ),
] , timeout=__snake_case , )
return _RunOutput(await p.wait() , __snake_case , __snake_case )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[Any]=None , __snake_case : str=None , __snake_case : Tuple=180 , __snake_case : Dict=False , __snake_case : Optional[Any]=True ):
'''simple docstring'''
UpperCAmelCase_ : str = asyncio.get_event_loop()
UpperCAmelCase_ : int = loop.run_until_complete(
_stream_subprocess(__snake_case , env=__snake_case , stdin=__snake_case , timeout=__snake_case , quiet=__snake_case , echo=__snake_case ) )
UpperCAmelCase_ : int = ' '.join(__snake_case )
if result.returncode > 0:
UpperCAmelCase_ : int = '\n'.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class lowerCamelCase (_snake_case ):
'''simple docstring'''
pass
def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any]=False ):
'''simple docstring'''
try:
UpperCAmelCase_ : List[Any] = subprocess.check_output(__snake_case , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__snake_case , 'decode' ):
UpperCAmelCase_ : str = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(__snake_case )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 29 | 1 |
__UpperCAmelCase = 9.8_0_6_6_5
def lowercase__ ( __snake_case : float , __snake_case : float , __snake_case : float = g ):
'''simple docstring'''
if fluid_density <= 0:
raise ValueError('Impossible fluid density' )
if volume < 0:
raise ValueError('Impossible Object volume' )
if gravity <= 0:
raise ValueError('Impossible Gravity' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 29 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__UpperCAmelCase = logging.getLogger(__name__)
def lowercase__ ( __snake_case : List[Any]=2 , __snake_case : Union[str, Any]=3 , __snake_case : Any=16 , __snake_case : int = 10 , __snake_case : int = 2 ):
'''simple docstring'''
def get_dataset(__snake_case : Optional[Any] ):
UpperCAmelCase_ : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCAmelCase_ : Any = get_dataset(__snake_case )
UpperCAmelCase_ : str = get_dataset(__snake_case )
UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowercase__ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple=None ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = []
for epoch in range(__snake_case ):
# Train quickly
model.train()
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = batch
UpperCAmelCase_ : List[Any] = model(__snake_case )
UpperCAmelCase_ : int = torch.nn.functional.mse_loss(__snake_case , __snake_case )
accelerator.backward(__snake_case )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class lowerCamelCase (nn.Module ):
'''simple docstring'''
def __init__( self ) -> Optional[Any]:
super().__init__()
UpperCAmelCase_ : List[Any] = nn.Parameter(torch.randn(1 ) )
UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.randn(1 ) )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[Any]:
return x * self.a + self.b
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders()
UpperCAmelCase_ : Optional[int] = ProjectConfiguration(total_limit=1 , project_dir=_UpperCamelCase , automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : Dict = Accelerator(project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Optional[Any] = DummyModel()
UpperCAmelCase_ : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders()
# Train baseline
UpperCAmelCase_ : Tuple = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
UpperCAmelCase_ : Any = os.path.join(_UpperCamelCase , 'initial' )
accelerator.save_state(_UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase_ : Dict = optimizer.state_dict()
UpperCAmelCase_ : Union[str, Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Union[str, Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Any = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase_ : int = DummyModel()
UpperCAmelCase_ : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : str = dummy_dataloaders()
UpperCAmelCase_ : Optional[Any] = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
accelerator.load_state(_UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : List[str] = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Dict = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save everything
UpperCAmelCase_ : Union[str, Any] = os.path.join(_UpperCamelCase , 'checkpoint' )
accelerator.save_state(_UpperCamelCase )
# Load everything back in and make sure all states work
accelerator.load_state(_UpperCamelCase )
test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dummy_dataloaders()
UpperCAmelCase_ : Any = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : str = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
UpperCAmelCase_ : Optional[Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Tuple = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase_ : Any = DummyModel()
UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders()
UpperCAmelCase_ : Tuple = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) )
((UpperCAmelCase_) , (UpperCAmelCase_)) : str = model.a.item(), model.b.item()
UpperCAmelCase_ : List[Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : List[Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Dict = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Optional[Any] = torch.tensor([1, 2, 3] )
UpperCAmelCase_ : Any = torch.tensor([2, 3, 4] )
UpperCAmelCase_ : Union[str, Any] = DummyModel()
UpperCAmelCase_ : List[str] = torch.optim.Adam(net.parameters() )
UpperCAmelCase_ : Any = Accelerator()
with self.assertRaises(_UpperCamelCase ) as ve:
accelerator.register_for_checkpointing(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[int] = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : int = DummyModel()
UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ : Dict = torch.optim.lr_scheduler.StepLR(_UpperCamelCase , step_size=1 , gamma=0.99 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders()
UpperCAmelCase_ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : Tuple = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
UpperCAmelCase_ : Dict = scheduler.state_dict()
train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
self.assertNotEqual(_UpperCamelCase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(_UpperCamelCase , scheduler.state_dict() )
def __UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Optional[int] = DummyModel()
UpperCAmelCase_ : Dict = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase , total_limit=2 )
# Train baseline
UpperCAmelCase_ : Optional[int] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ : str = accelerator.prepare(_UpperCamelCase )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : List[str] = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
__UpperCAmelCase = '/tmp/accelerate/state_checkpointing'
__UpperCAmelCase = DummyModel()
__UpperCAmelCase = torch.optim.Adam(params=model.parameters(), lr=1E-3)
__UpperCAmelCase = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
__UpperCAmelCase , __UpperCAmelCase = dummy_dataloaders()
__UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__UpperCAmelCase = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert param_device.type == accelerator.device.type
__UpperCAmelCase = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 29 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'spiece.model'}
__UpperCAmelCase = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
__UpperCAmelCase = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
__UpperCAmelCase = '▁'
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Union[str, Any] = VOCAB_FILES_NAMES
_snake_case : List[str] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _UpperCamelCase , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase="[CLS]" , _UpperCamelCase="[SEP]" , _UpperCamelCase="<unk>" , _UpperCamelCase="[SEP]" , _UpperCamelCase="<pad>" , _UpperCamelCase="[CLS]" , _UpperCamelCase="[MASK]" , _UpperCamelCase = None , **_UpperCamelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
UpperCAmelCase_ : Dict = (
AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase , normalized=_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase )
else mask_token
)
UpperCAmelCase_ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_UpperCamelCase , remove_space=_UpperCamelCase , keep_accents=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
UpperCAmelCase_ : List[str] = do_lower_case
UpperCAmelCase_ : Optional[Any] = remove_space
UpperCAmelCase_ : Union[str, Any] = keep_accents
UpperCAmelCase_ : Any = vocab_file
UpperCAmelCase_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCamelCase )
@property
def __UpperCAmelCase ( self ) -> Union[str, Any]:
return len(self.sp_model )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = self.__dict__.copy()
UpperCAmelCase_ : Any = None
return state
def __setstate__( self , _UpperCamelCase ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCAmelCase_ : Dict = {}
UpperCAmelCase_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[str]:
if self.remove_space:
UpperCAmelCase_ : Tuple = ' '.join(inputs.strip().split() )
else:
UpperCAmelCase_ : List[Any] = inputs
UpperCAmelCase_ : int = outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
UpperCAmelCase_ : int = unicodedata.normalize('NFKD' , _UpperCamelCase )
UpperCAmelCase_ : str = ''.join([c for c in outputs if not unicodedata.combining(_UpperCamelCase )] )
if self.do_lower_case:
UpperCAmelCase_ : Optional[int] = outputs.lower()
return outputs
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[str]:
UpperCAmelCase_ : Any = self.preprocess_text(_UpperCamelCase )
UpperCAmelCase_ : int = self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
UpperCAmelCase_ : Any = []
for piece in pieces:
if len(_UpperCamelCase ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
UpperCAmelCase_ : List[str] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCamelCase , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
UpperCAmelCase_ : Union[str, Any] = cur_pieces[1:]
else:
UpperCAmelCase_ : str = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_UpperCamelCase )
else:
new_pieces.append(_UpperCamelCase )
return new_pieces
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any:
return self.sp_model.PieceToId(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Tuple:
return self.sp_model.IdToPiece(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[Any]:
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : List[Any] = ''
UpperCAmelCase_ : List[str] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCamelCase ) + token
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : Optional[int] = []
else:
current_sub_tokens.append(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = False
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
UpperCAmelCase_ : Tuple = [self.sep_token_id]
UpperCAmelCase_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
UpperCAmelCase_ : int = [self.sep_token_id]
UpperCAmelCase_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(_UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase_ : int = os.path.join(
_UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , 'wb' ) as fi:
UpperCAmelCase_ : Tuple = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
| 29 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> None:
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
| 29 | 1 |
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if not head:
return True
# split the list to two parts
UpperCAmelCase_ , UpperCAmelCase_ : Any = head.next, head
while fast and fast.next:
UpperCAmelCase_ : str = fast.next.next
UpperCAmelCase_ : Union[str, Any] = slow.next
UpperCAmelCase_ : int = slow.next
UpperCAmelCase_ : List[Any] = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase_ : Tuple = None
while second:
UpperCAmelCase_ : int = second.next
UpperCAmelCase_ : Any = node
UpperCAmelCase_ : Optional[Any] = second
UpperCAmelCase_ : Tuple = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase_ : Optional[Any] = node.next
UpperCAmelCase_ : Dict = head.next
return True
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase_ : Any = head
while fast and fast.next:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase_ : List[str] = [slow.val]
while slow.next:
UpperCAmelCase_ : List[str] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase_ : int = cur.next
return True
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if not head or not head.next:
return True
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : int = 0
while head:
if head.val in d:
d[head.val].append(__snake_case )
else:
UpperCAmelCase_ : List[Any] = [pos]
UpperCAmelCase_ : Any = head.next
pos += 1
UpperCAmelCase_ : Dict = pos - 1
UpperCAmelCase_ : Optional[int] = 0
for v in d.values():
if len(__snake_case ) % 2 != 0:
middle += 1
else:
UpperCAmelCase_ : int = 0
for i in range(0 , len(__snake_case ) ):
if v[i] + v[len(__snake_case ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 29 |
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if not head:
return True
# split the list to two parts
UpperCAmelCase_ , UpperCAmelCase_ : Any = head.next, head
while fast and fast.next:
UpperCAmelCase_ : str = fast.next.next
UpperCAmelCase_ : Union[str, Any] = slow.next
UpperCAmelCase_ : int = slow.next
UpperCAmelCase_ : List[Any] = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase_ : Tuple = None
while second:
UpperCAmelCase_ : int = second.next
UpperCAmelCase_ : Any = node
UpperCAmelCase_ : Optional[Any] = second
UpperCAmelCase_ : Tuple = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase_ : Optional[Any] = node.next
UpperCAmelCase_ : Dict = head.next
return True
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase_ : Any = head
while fast and fast.next:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase_ : List[str] = [slow.val]
while slow.next:
UpperCAmelCase_ : List[str] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase_ : int = cur.next
return True
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if not head or not head.next:
return True
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : int = 0
while head:
if head.val in d:
d[head.val].append(__snake_case )
else:
UpperCAmelCase_ : List[Any] = [pos]
UpperCAmelCase_ : Any = head.next
pos += 1
UpperCAmelCase_ : Dict = pos - 1
UpperCAmelCase_ : Optional[int] = 0
for v in d.values():
if len(__snake_case ) % 2 != 0:
middle += 1
else:
UpperCAmelCase_ : int = 0
for i in range(0 , len(__snake_case ) ):
if v[i] + v[len(__snake_case ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 29 | 1 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase (_snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : int = SpeechTaTokenizer
_snake_case : int = False
_snake_case : Dict = True
def __UpperCAmelCase ( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ : Optional[Any] = SpeechTaTokenizer(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = AddedToken('<mask>' , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> int:
UpperCAmelCase_ : Dict = 'this is a test'
UpperCAmelCase_ : Optional[Any] = 'this is a test'
return input_text, output_text
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=2_0 , _UpperCamelCase=5 ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.get_input_output_texts(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
UpperCAmelCase_ : str = tokenizer.decode(_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
return text, ids
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : Optional[int] = '<pad>'
UpperCAmelCase_ : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase ) , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-4] , 'œ' )
self.assertEqual(vocab_keys[-2] , '<mask>' )
self.assertEqual(vocab_keys[-1] , '<ctc_blank>' )
self.assertEqual(len(_UpperCamelCase ) , 8_1 )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 7_9 )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : Optional[int] = self.get_tokenizers(do_lower_case=_UpperCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
UpperCAmelCase_ : int = tokenizer.vocab_size
UpperCAmelCase_ : int = len(_UpperCamelCase )
self.assertNotEqual(_UpperCamelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
UpperCAmelCase_ : List[str] = ['aaaaa bbbbbb', 'cccccccccdddddddd']
UpperCAmelCase_ : Dict = tokenizer.add_tokens(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = tokenizer.vocab_size
UpperCAmelCase_ : Any = len(_UpperCamelCase )
self.assertNotEqual(_UpperCamelCase , 0 )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , len(_UpperCamelCase ) )
self.assertEqual(_UpperCamelCase , all_size + len(_UpperCamelCase ) )
UpperCAmelCase_ : Dict = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=_UpperCamelCase )
self.assertGreaterEqual(len(_UpperCamelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
UpperCAmelCase_ : Tuple = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
UpperCAmelCase_ : Tuple = tokenizer.add_special_tokens(_UpperCamelCase )
UpperCAmelCase_ : Dict = tokenizer.vocab_size
UpperCAmelCase_ : List[str] = len(_UpperCamelCase )
self.assertNotEqual(_UpperCamelCase , 0 )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , len(_UpperCamelCase ) )
self.assertEqual(_UpperCamelCase , all_size_a + len(_UpperCamelCase ) )
UpperCAmelCase_ : Any = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=_UpperCamelCase )
self.assertGreaterEqual(len(_UpperCamelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def __UpperCAmelCase ( self ) -> Any:
pass
def __UpperCAmelCase ( self ) -> str:
pass
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : str = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(_UpperCamelCase , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [4, 3_2, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 7, 4, 6, 5, 1_2, 6] , )
UpperCAmelCase_ : Optional[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_UpperCamelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
UpperCAmelCase_ : Any = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
# fmt: off
self.assertListEqual(_UpperCamelCase , [4, 3_0, 4, 2_0, 7, 1_2, 4, 2_5, 8, 1_3, 9, 4, 1_0, 9, 4, 3, 2_3, 4, 7, 9, 1_4, 4, 6, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 1_9, 7, 1_5, 1_2, 7_3, 2_6] )
# fmt: on
UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def __UpperCAmelCase ( self ) -> Union[str, Any]:
# Use custom sequence because this tokenizer does not handle numbers.
UpperCAmelCase_ : Optional[Any] = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
UpperCAmelCase_ : Dict = {
'input_ids': [
[4, 3_2, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 6_4, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_5, 2_2, 4, 2_8, 9, 8, 2_0, 9, 4, 7, 1_2, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 6, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 7, 9, 1_4, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 3_9, 2_5, 5, 1_3, 6, 6_3, 4, 2_4, 1_3, 8, 2_7, 1_0, 1_4, 5, 1_2, 4, 2_1, 5, 9, 5, 1_3, 7, 1_5, 3_9, 2_4, 1_6, 1_3, 2_4, 8, 1_2, 5, 4, 7, 1_3, 1_7, 1_1, 1_0, 6, 5, 1_7, 6, 1_6, 1_3, 5, 1_2, 4, 6_4, 4_0, 4_7, 5_4, 3_2, 2_3, 4, 5_3, 4_9, 3_2, 2_3, 4, 5_4, 8, 4_0, 4_7, 5_4, 3_2, 7, 2_3, 4, 6_9, 5_2, 4_3, 2_3, 4, 5_1, 1_0, 1_2, 6, 1_0, 1_5, 4_0, 5, 1_3, 6, 2_3, 4, 6_9, 5_2, 4_8, 5, 6, 2_6, 2_6, 2_6, 6_3, 4, 1_9, 8, 1_3, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 6_1, 9, 1_4, 5, 1_3, 1_2, 6, 7, 9, 1_4, 1_0, 9, 2_1, 4, 6_4, 4_8, 5_2, 6_1, 6_3, 4, 7, 9, 1_4, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 5_3, 5, 9, 5, 1_3, 7, 6, 1_0, 8, 9, 4, 6_4, 4_8, 5_2, 5_3, 6_3, 4, 2_0, 1_0, 6, 1_1, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 1_0, 1_3, 6, 2_2, 3_9, 6, 2_0, 8, 4, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 4, 1_8, 8, 1_4, 5, 1_5, 1_2, 4, 1_0, 9, 4, 8, 9, 5, 4, 1_1, 1_6, 9, 1_4, 1_3, 5, 1_4, 4, 2_4, 1_5, 1_6, 1_2, 4, 1_5, 7, 9, 2_1, 1_6, 7, 2_1, 5, 1_2, 4, 7, 9, 1_4, 4, 1_4, 5, 5, 2_4, 4, 1_0, 9, 6, 5, 1_3, 8, 2_4, 5, 1_3, 7, 2_5, 1_0, 1_5, 1_0, 6, 2_2, 4, 2_5, 5, 6, 2_0, 5, 5, 9, 4, 5_8, 7, 3_7, 2_3, 4, 4_9, 2_2, 3_2, 8, 1_3, 1_7, 1_1, 4, 7, 9, 1_4, 4, 3_2, 5, 9, 1_2, 8, 1_3, 5_5, 1_5, 8, 2_0, 2_6, 2],
[4, 4_0, 4_7, 5_4, 3_2, 4, 1_0, 1_2, 4, 1_4, 5, 1_2, 1_0, 2_1, 9, 5, 1_4, 4, 6, 8, 4, 2_4, 1_3, 5, 3_9, 6, 1_3, 7, 1_0, 9, 4, 1_4, 5, 5, 2_4, 4, 2_5, 1_0, 1_4, 1_0, 1_3, 5, 1_7, 6, 1_0, 8, 9, 7, 1_5, 4, 1_3, 5, 2_4, 1_3, 5, 1_2, 5, 9, 6, 7, 6, 1_0, 8, 9, 1_2, 4, 1_9, 1_3, 8, 1_8, 4, 1_6, 9, 1_5, 7, 2_5, 5, 1_5, 5, 1_4, 4, 6, 5, 3_7, 6, 4, 2_5, 2_2, 4, 4_6, 8, 1_0, 9, 6, 1_5, 2_2, 4, 1_7, 8, 9, 1_4, 1_0, 6, 1_0, 8, 9, 1_0, 9, 2_1, 4, 8, 9, 4, 2_5, 8, 6, 1_1, 4, 1_5, 5, 1_9, 6, 4, 7, 9, 1_4, 4, 1_3, 1_0, 2_1, 1_1, 6, 4, 1_7, 8, 9, 6, 5, 3_7, 6, 4, 1_0, 9, 4, 7, 1_5, 1_5, 4, 1_5, 7, 2_2, 5, 1_3, 1_2, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 3_2, 1_1, 5, 4, 4_5, 1_6, 1_0, 1_7, 2_8, 4, 2_5, 1_3, 8, 2_0, 9, 4, 1_9, 8, 3_7, 4, 4_6, 1_6, 1_8, 2_4, 1_2, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 5, 4, 1_5, 7, 5_7, 2_2, 4, 1_4, 8, 2_1, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCamelCase , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=_UpperCamelCase , )
| 29 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = '''ctrl'''
_snake_case : Tuple = ['''past_key_values''']
_snake_case : Tuple = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _UpperCamelCase=2_4_6_5_3_4 , _UpperCamelCase=2_5_6 , _UpperCamelCase=1_2_8_0 , _UpperCamelCase=8_1_9_2 , _UpperCamelCase=4_8 , _UpperCamelCase=1_6 , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=1E-6 , _UpperCamelCase=0.02 , _UpperCamelCase=True , **_UpperCamelCase , ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Dict = n_positions
UpperCAmelCase_ : List[str] = n_embd
UpperCAmelCase_ : Optional[Any] = n_layer
UpperCAmelCase_ : List[Any] = n_head
UpperCAmelCase_ : Any = dff
UpperCAmelCase_ : Union[str, Any] = resid_pdrop
UpperCAmelCase_ : Any = embd_pdrop
UpperCAmelCase_ : int = layer_norm_epsilon
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : Any = use_cache
super().__init__(**_UpperCamelCase )
| 29 |
__UpperCAmelCase = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 29 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : str = '''visual_bert'''
def __init__( self , _UpperCamelCase=3_0_5_2_2 , _UpperCamelCase=7_6_8 , _UpperCamelCase=5_1_2 , _UpperCamelCase=1_2 , _UpperCamelCase=1_2 , _UpperCamelCase=3_0_7_2 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=5_1_2 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=1E-12 , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=2 , **_UpperCamelCase , ) -> List[Any]:
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Any = visual_embedding_dim
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : int = intermediate_size
UpperCAmelCase_ : Tuple = hidden_act
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = initializer_range
UpperCAmelCase_ : Union[str, Any] = type_vocab_size
UpperCAmelCase_ : List[Any] = layer_norm_eps
UpperCAmelCase_ : Any = bypass_transformer
UpperCAmelCase_ : Union[str, Any] = special_visual_initialize
| 29 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : "DiagonalGaussianDistribution"
class lowerCamelCase (_snake_case , _snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = True
@register_to_config
def __init__( self , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = ("DownEncoderBlock2D",) , _UpperCamelCase = ("UpDecoderBlock2D",) , _UpperCamelCase = (6_4,) , _UpperCamelCase = 1 , _UpperCamelCase = "silu" , _UpperCamelCase = 4 , _UpperCamelCase = 3_2 , _UpperCamelCase = 3_2 , _UpperCamelCase = 0.1_82_15 , ) -> List[Any]:
super().__init__()
# pass init params to Encoder
UpperCAmelCase_ : List[str] = Encoder(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , down_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , act_fn=_UpperCamelCase , norm_num_groups=_UpperCamelCase , double_z=_UpperCamelCase , )
# pass init params to Decoder
UpperCAmelCase_ : Dict = Decoder(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , up_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , norm_num_groups=_UpperCamelCase , act_fn=_UpperCamelCase , )
UpperCAmelCase_ : Any = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
UpperCAmelCase_ : List[Any] = nn.Convad(_UpperCamelCase , _UpperCamelCase , 1 )
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : int = False
# only relevant if vae tiling is enabled
UpperCAmelCase_ : Optional[int] = self.config.sample_size
UpperCAmelCase_ : int = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
UpperCAmelCase_ : Union[str, Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
UpperCAmelCase_ : Optional[Any] = 0.25
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False ) -> List[str]:
if isinstance(_UpperCamelCase , (Encoder, Decoder) ):
UpperCAmelCase_ : Union[str, Any] = value
def __UpperCAmelCase ( self , _UpperCamelCase = True ) -> int:
UpperCAmelCase_ : Tuple = use_tiling
def __UpperCAmelCase ( self ) -> Dict:
self.enable_tiling(_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : str = True
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __UpperCAmelCase ( self ) -> Dict[str, AttentionProcessor]:
UpperCAmelCase_ : Optional[int] = {}
def fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if hasattr(_UpperCamelCase , 'set_processor' ):
UpperCAmelCase_ : Optional[int] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return processors
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = len(self.attn_processors.keys() )
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(_UpperCamelCase )} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if hasattr(_UpperCamelCase , 'set_processor' ):
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
module.set_processor(_UpperCamelCase )
else:
module.set_processor(processor.pop(f"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_UpperCamelCase , return_dict=_UpperCamelCase )
if self.use_slicing and x.shape[0] > 1:
UpperCAmelCase_ : Union[str, Any] = [self.encoder(_UpperCamelCase ) for x_slice in x.split(1 )]
UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase )
else:
UpperCAmelCase_ : List[Any] = self.encoder(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = self.quant_conv(_UpperCamelCase )
UpperCAmelCase_ : Tuple = DiagonalGaussianDistribution(_UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_UpperCamelCase , return_dict=_UpperCamelCase )
UpperCAmelCase_ : str = self.post_quant_conv(_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.decoder(_UpperCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
@apply_forward_hook
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
UpperCAmelCase_ : List[str] = [self._decode(_UpperCamelCase ).sample for z_slice in z.split(1 )]
UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase )
else:
UpperCAmelCase_ : Any = self._decode(_UpperCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : Tuple = min(a.shape[2] , b.shape[2] , _UpperCamelCase )
for y in range(_UpperCamelCase ):
UpperCAmelCase_ : str = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Tuple = min(a.shape[3] , b.shape[3] , _UpperCamelCase )
for x in range(_UpperCamelCase ):
UpperCAmelCase_ : int = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput:
UpperCAmelCase_ : Any = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : Tuple = int(self.tile_latent_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : Optional[int] = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
UpperCAmelCase_ : List[str] = []
for i in range(0 , x.shape[2] , _UpperCamelCase ):
UpperCAmelCase_ : Any = []
for j in range(0 , x.shape[3] , _UpperCamelCase ):
UpperCAmelCase_ : Any = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
UpperCAmelCase_ : Dict = self.encoder(_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.quant_conv(_UpperCamelCase )
row.append(_UpperCamelCase )
rows.append(_UpperCamelCase )
UpperCAmelCase_ : str = []
for i, row in enumerate(_UpperCamelCase ):
UpperCAmelCase_ : List[Any] = []
for j, tile in enumerate(_UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Dict = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase )
if j > 0:
UpperCAmelCase_ : List[str] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) )
UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=2 )
UpperCAmelCase_ : List[Any] = DiagonalGaussianDistribution(_UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : str = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : Dict = int(self.tile_sample_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : Dict = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
UpperCAmelCase_ : Union[str, Any] = []
for i in range(0 , z.shape[2] , _UpperCamelCase ):
UpperCAmelCase_ : List[str] = []
for j in range(0 , z.shape[3] , _UpperCamelCase ):
UpperCAmelCase_ : List[str] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
UpperCAmelCase_ : Optional[Any] = self.post_quant_conv(_UpperCamelCase )
UpperCAmelCase_ : Tuple = self.decoder(_UpperCamelCase )
row.append(_UpperCamelCase )
rows.append(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = []
for i, row in enumerate(_UpperCamelCase ):
UpperCAmelCase_ : List[Any] = []
for j, tile in enumerate(_UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Union[str, Any] = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase )
if j > 0:
UpperCAmelCase_ : Optional[Any] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) )
UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : Optional[Any] = sample
UpperCAmelCase_ : Union[str, Any] = self.encode(_UpperCamelCase ).latent_dist
if sample_posterior:
UpperCAmelCase_ : str = posterior.sample(generator=_UpperCamelCase )
else:
UpperCAmelCase_ : int = posterior.mode()
UpperCAmelCase_ : Dict = self.decode(_UpperCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
| 29 | 1 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument('--model_ckpt' , type=__snake_case , default='microsoft/unixcoder-base-nine' )
parser.add_argument('--num_epochs' , type=__snake_case , default=5 )
parser.add_argument('--batch_size' , type=__snake_case , default=6 )
parser.add_argument('--gradient_accumulation_steps' , type=__snake_case , default=1 )
parser.add_argument('--freeze' , type=__snake_case , default=__snake_case )
parser.add_argument('--learning_rate' , type=__snake_case , default=5E-4 )
parser.add_argument('--seed' , type=__snake_case , default=0 )
parser.add_argument('--lr_scheduler_type' , type=__snake_case , default='cosine' )
parser.add_argument('--num_warmup_steps' , type=__snake_case , default=10 )
parser.add_argument('--weight_decay' , type=__snake_case , default=0.01 )
parser.add_argument('--output_dir' , type=__snake_case , default='./results' )
return parser.parse_args()
__UpperCAmelCase = load('accuracy')
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : int = eval_pred
UpperCAmelCase_ : int = np.argmax(__snake_case , axis=1 )
return metric.compute(predictions=__snake_case , references=__snake_case )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase ) -> None:
super().__init__()
UpperCAmelCase_ : Union[str, Any] = trainer
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> str:
if control.should_evaluate:
UpperCAmelCase_ : Any = deepcopy(_UpperCamelCase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='train' )
return control_copy
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = get_args()
set_seed(args.seed )
UpperCAmelCase_ : List[str] = load_dataset('codeparrot/codecomplex' , split='train' )
UpperCAmelCase_ : Optional[int] = dataset.train_test_split(test_size=0.2 )
UpperCAmelCase_ : Union[str, Any] = train_test['test'].train_test_split(test_size=0.5 )
UpperCAmelCase_ : List[str] = DatasetDict(
{
'train': train_test['train'],
'test': test_validation['train'],
'valid': test_validation['test'],
} )
print('Loading tokenizer and model' )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCAmelCase_ : List[Any] = tokenizer.eos_token
UpperCAmelCase_ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
UpperCAmelCase_ : int = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
UpperCAmelCase_ : Dict = False
UpperCAmelCase_ : Tuple = ClassLabel(num_classes=7 , names=list(set(train_test_validation['train']['complexity'] ) ) )
def tokenize(__snake_case : str ):
UpperCAmelCase_ : str = tokenizer(example['src'] , truncation=__snake_case , max_length=1_024 )
UpperCAmelCase_ : Dict = labels.straint(example['complexity'] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
UpperCAmelCase_ : List[Any] = train_test_validation.map(
__snake_case , batched=__snake_case , remove_columns=train_test_validation['train'].column_names , )
UpperCAmelCase_ : Tuple = DataCollatorWithPadding(tokenizer=__snake_case )
UpperCAmelCase_ : Optional[Any] = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='epoch' , save_strategy='epoch' , logging_strategy='epoch' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='accuracy' , run_name='complexity-java' , report_to='wandb' , )
UpperCAmelCase_ : str = Trainer(
model=__snake_case , args=__snake_case , train_dataset=tokenized_datasets['train'] , eval_dataset=tokenized_datasets['valid'] , tokenizer=__snake_case , data_collator=__snake_case , compute_metrics=__snake_case , )
print('Training...' )
trainer.add_callback(CustomCallback(__snake_case ) )
trainer.train()
if __name__ == "__main__":
main()
| 29 |
def lowercase__ ( __snake_case : int , __snake_case : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCAmelCase_ : Tuple = str(bin(__snake_case ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : Union[str, Any] = str(bin(__snake_case ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : List[Any] = max(len(__snake_case ) , len(__snake_case ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(__snake_case ) , b_binary.zfill(__snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 1 |
import math
import sys
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = ''
try:
with open(__snake_case , 'rb' ) as binary_file:
UpperCAmelCase_ : str = binary_file.read()
for dat in data:
UpperCAmelCase_ : List[str] = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : int = {'0': '0', '1': '1'}
UpperCAmelCase_ , UpperCAmelCase_ : int = '', ''
UpperCAmelCase_ : Optional[Any] = len(__snake_case )
for i in range(len(__snake_case ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCAmelCase_ : str = lexicon[curr_string]
result += last_match_id
UpperCAmelCase_ : Tuple = last_match_id + '0'
if math.loga(__snake_case ).is_integer():
UpperCAmelCase_ : List[Any] = {}
for curr_key in list(__snake_case ):
UpperCAmelCase_ : Optional[int] = lexicon.pop(__snake_case )
UpperCAmelCase_ : Optional[Any] = new_lex
UpperCAmelCase_ : Tuple = last_match_id + '1'
index += 1
UpperCAmelCase_ : Tuple = ''
return result
def lowercase__ ( __snake_case : str , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = 8
try:
with open(__snake_case , 'wb' ) as opened_file:
UpperCAmelCase_ : Dict = [
to_write[i : i + byte_length]
for i in range(0 , len(__snake_case ) , __snake_case )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(__snake_case , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
UpperCAmelCase_ : List[str] = data_bits[counter:]
UpperCAmelCase_ : Union[str, Any] = data_bits[counter + 1 :]
return data_bits
def lowercase__ ( __snake_case : str , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = read_file_binary(__snake_case )
UpperCAmelCase_ : List[str] = remove_prefix(__snake_case )
UpperCAmelCase_ : Union[str, Any] = decompress_data(__snake_case )
write_file_binary(__snake_case , __snake_case )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 29 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'vocab.txt'}
__UpperCAmelCase = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__UpperCAmelCase = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__UpperCAmelCase = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = VOCAB_FILES_NAMES
_snake_case : int = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Dict = PRETRAINED_INIT_CONFIGURATION
_snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Any = ConvBertTokenizer
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase="[UNK]" , _UpperCamelCase="[SEP]" , _UpperCamelCase="[PAD]" , _UpperCamelCase="[CLS]" , _UpperCamelCase="[MASK]" , _UpperCamelCase=True , _UpperCamelCase=None , **_UpperCamelCase , ) -> Dict:
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
UpperCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _UpperCamelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ : Any = getattr(_UpperCamelCase , normalizer_state.pop('type' ) )
UpperCAmelCase_ : str = do_lower_case
UpperCAmelCase_ : List[Any] = strip_accents
UpperCAmelCase_ : str = tokenize_chinese_chars
UpperCAmelCase_ : Tuple = normalizer_class(**_UpperCamelCase )
UpperCAmelCase_ : Any = do_lower_case
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=None ) -> List[str]:
UpperCAmelCase_ : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
UpperCAmelCase_ : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
UpperCAmelCase_ : Any = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 29 | 1 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
__UpperCAmelCase = TypeVar('T')
class lowerCamelCase (Generic[T] ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase ) -> None:
UpperCAmelCase_ : Any | T = None
UpperCAmelCase_ : int = len(_UpperCamelCase )
UpperCAmelCase_ : list[T] = [any_type for _ in range(self.N )] + arr
UpperCAmelCase_ : Optional[int] = fnc
self.build()
def __UpperCAmelCase ( self ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
UpperCAmelCase_ : str = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> None:
p += self.N
UpperCAmelCase_ : List[str] = v
while p > 1:
UpperCAmelCase_ : Optional[Any] = p // 2
UpperCAmelCase_ : Union[str, Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> T | None: # noqa: E741
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = l + self.N, r + self.N
UpperCAmelCase_ : T | None = None
while l <= r:
if l % 2 == 1:
UpperCAmelCase_ : List[str] = self.st[l] if res is None else self.fn(_UpperCamelCase , self.st[l] )
if r % 2 == 0:
UpperCAmelCase_ : Dict = self.st[r] if res is None else self.fn(_UpperCamelCase , self.st[r] )
UpperCAmelCase_ , UpperCAmelCase_ : str = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
__UpperCAmelCase = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
__UpperCAmelCase = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
__UpperCAmelCase = SegmentTree(test_array, min)
__UpperCAmelCase = SegmentTree(test_array, max)
__UpperCAmelCase = SegmentTree(test_array, lambda a, b: a + b)
def lowercase__ ( ):
'''simple docstring'''
for i in range(len(__snake_case ) ):
for j in range(__snake_case , len(__snake_case ) ):
UpperCAmelCase_ : Dict = reduce(__snake_case , test_array[i : j + 1] )
UpperCAmelCase_ : Dict = reduce(__snake_case , test_array[i : j + 1] )
UpperCAmelCase_ : str = reduce(lambda __snake_case , __snake_case : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(__snake_case , __snake_case )
assert max_range == max_segment_tree.query(__snake_case , __snake_case )
assert sum_range == sum_segment_tree.query(__snake_case , __snake_case )
test_all_segments()
for index, value in test_updates.items():
__UpperCAmelCase = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 29 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'snap-research/efficientformer-l1-300': (
'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'
),
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = '''efficientformer'''
def __init__( self , _UpperCamelCase = [3, 2, 6, 4] , _UpperCamelCase = [4_8, 9_6, 2_2_4, 4_4_8] , _UpperCamelCase = [True, True, True, True] , _UpperCamelCase = 4_4_8 , _UpperCamelCase = 3_2 , _UpperCamelCase = 4 , _UpperCamelCase = 7 , _UpperCamelCase = 5 , _UpperCamelCase = 8 , _UpperCamelCase = 4 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1_6 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 2 , _UpperCamelCase = 1 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1 , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1E-5 , _UpperCamelCase = "gelu" , _UpperCamelCase = 0.02 , _UpperCamelCase = 1E-12 , _UpperCamelCase = 2_2_4 , _UpperCamelCase = 1E-05 , **_UpperCamelCase , ) -> None:
super().__init__(**_UpperCamelCase )
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase_ : Tuple = hidden_sizes
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : List[str] = patch_size
UpperCAmelCase_ : Union[str, Any] = num_channels
UpperCAmelCase_ : Optional[Any] = depths
UpperCAmelCase_ : List[Any] = mlp_expansion_ratio
UpperCAmelCase_ : List[str] = downsamples
UpperCAmelCase_ : List[Any] = dim
UpperCAmelCase_ : Tuple = key_dim
UpperCAmelCase_ : Optional[int] = attention_ratio
UpperCAmelCase_ : str = resolution
UpperCAmelCase_ : Dict = pool_size
UpperCAmelCase_ : Union[str, Any] = downsample_patch_size
UpperCAmelCase_ : List[str] = downsample_stride
UpperCAmelCase_ : List[str] = downsample_pad
UpperCAmelCase_ : Any = drop_path_rate
UpperCAmelCase_ : Dict = num_metaad_blocks
UpperCAmelCase_ : Dict = distillation
UpperCAmelCase_ : int = use_layer_scale
UpperCAmelCase_ : Any = layer_scale_init_value
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : Dict = batch_norm_eps
| 29 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__UpperCAmelCase = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def lowercase__ ( __snake_case : str , __snake_case : int=None , __snake_case : List[str]=None , __snake_case : Tuple=None ):
'''simple docstring'''
UpperCAmelCase_ : int = True
while ask_again:
UpperCAmelCase_ : Any = input(__snake_case )
try:
if default is not None and len(__snake_case ) == 0:
return default
return convert_value(__snake_case ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__snake_case )
def lowercase__ ( __snake_case : Tuple , __snake_case : Tuple=[] , __snake_case : List[str]=None , __snake_case : List[str]=0 ):
'''simple docstring'''
UpperCAmelCase_ : str = BulletMenu(__snake_case , __snake_case )
UpperCAmelCase_ : Union[str, Any] = menu.run(default_choice=__snake_case )
return convert_value(__snake_case ) if convert_value is not None else result
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = int(__snake_case )
return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value] )
def lowercase__ ( __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : int = int(__snake_case )
return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'TPU'][value] )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Any = int(__snake_case )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = int(__snake_case )
return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value] )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = int(__snake_case )
return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value] )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class lowerCamelCase (argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = super()._format_usage(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Dict = usage.replace('<command> [<args>] ' , '' )
return usage
| 29 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Union[PIL.Image.Image, np.ndarray]
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Any:
super().__init__()
self.register_modules(
prior=_UpperCamelCase , image_encoder=_UpperCamelCase , image_processor=_UpperCamelCase , scheduler=_UpperCamelCase , renderer=_UpperCamelCase , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
if latents is None:
UpperCAmelCase_ : str = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
UpperCAmelCase_ : Tuple = latents.to(_UpperCamelCase )
UpperCAmelCase_ : Tuple = latents * scheduler.init_noise_sigma
return latents
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase_ : int = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase_ : int = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
@property
def __UpperCAmelCase ( self ) -> int:
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_UpperCamelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> str:
if isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , torch.Tensor ):
UpperCAmelCase_ : int = torch.cat(_UpperCamelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(_UpperCamelCase , axis=0 )
if not isinstance(_UpperCamelCase , torch.Tensor ):
UpperCAmelCase_ : Optional[int] = self.image_processor(_UpperCamelCase , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
UpperCAmelCase_ : Tuple = image.to(dtype=self.image_encoder.dtype , device=_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = self.image_encoder(_UpperCamelCase )['last_hidden_state']
UpperCAmelCase_ : Union[str, Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
UpperCAmelCase_ : List[str] = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : Dict = torch.zeros_like(_UpperCamelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase_ : Optional[int] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = 2_5 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 4.0 , _UpperCamelCase = 6_4 , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> Union[str, Any]:
if isinstance(_UpperCamelCase , PIL.Image.Image ):
UpperCAmelCase_ : Tuple = 1
elif isinstance(_UpperCamelCase , torch.Tensor ):
UpperCAmelCase_ : str = image.shape[0]
elif isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
UpperCAmelCase_ : Optional[int] = len(_UpperCamelCase )
else:
raise ValueError(
f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_UpperCamelCase )}" )
UpperCAmelCase_ : Tuple = self._execution_device
UpperCAmelCase_ : str = batch_size * num_images_per_prompt
UpperCAmelCase_ : str = guidance_scale > 1.0
UpperCAmelCase_ : str = self._encode_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# prior
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
UpperCAmelCase_ : int = self.scheduler.timesteps
UpperCAmelCase_ : int = self.prior.config.num_embeddings
UpperCAmelCase_ : Any = self.prior.config.embedding_dim
UpperCAmelCase_ : List[str] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
UpperCAmelCase_ : List[Any] = latents.reshape(latents.shape[0] , _UpperCamelCase , _UpperCamelCase )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : Optional[Any] = self.scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : int = self.prior(
_UpperCamelCase , timestep=_UpperCamelCase , proj_embedding=_UpperCamelCase , ).predicted_image_embedding
# remove the variance
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 )
UpperCAmelCase_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
UpperCAmelCase_ : List[str] = self.scheduler.step(
_UpperCamelCase , timestep=_UpperCamelCase , sample=_UpperCamelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = []
for i, latent in enumerate(_UpperCamelCase ):
print()
UpperCAmelCase_ : List[str] = self.renderer.decode(
latent[None, :] , _UpperCamelCase , size=_UpperCamelCase , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = torch.stack(_UpperCamelCase )
if output_type not in ["np", "pil"]:
raise ValueError(f"Only the output types `pil` and `np` are supported not output_type={output_type}" )
UpperCAmelCase_ : Dict = images.cpu().numpy()
if output_type == "pil":
UpperCAmelCase_ : List[str] = [self.numpy_to_pil(_UpperCamelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_UpperCamelCase )
| 29 | 1 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__UpperCAmelCase = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__UpperCAmelCase = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
if "://" in dataset_path:
UpperCAmelCase_ : int = dataset_path.split('://' )[1]
return dataset_path
def lowercase__ ( __snake_case : fsspec.AbstractFileSystem ):
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( __snake_case : fsspec.AbstractFileSystem , __snake_case : str , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = not is_remote_filesystem(__snake_case )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__snake_case ) , fs._strip_protocol(__snake_case ) )
else:
fs.mv(__snake_case , __snake_case , recursive=__snake_case )
def lowercase__ ( ):
'''simple docstring'''
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : int = threading.Lock()
| 29 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : Union[str, Any] = IFImgaImgSuperResolutionPipeline
_snake_case : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
_snake_case : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
_snake_case : List[str] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __UpperCAmelCase ( self ) -> Optional[Any]:
return self._get_superresolution_dummy_components()
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> Any:
if str(_UpperCamelCase ).startswith('mps' ):
UpperCAmelCase_ : List[Any] = torch.manual_seed(_UpperCamelCase )
else:
UpperCAmelCase_ : int = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
UpperCAmelCase_ : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
UpperCAmelCase_ : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCAmelCase ( self ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __UpperCAmelCase ( self ) -> Dict:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __UpperCAmelCase ( self ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __UpperCAmelCase ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self._test_save_load_local()
def __UpperCAmelCase ( self ) -> Dict:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 29 | 1 |
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : str = torch.nn.Linear(2 , 4 )
UpperCAmelCase_ : Tuple = torch.optim.AdamW(model.parameters() , lr=1.0 )
UpperCAmelCase_ : List[Any] = torch.optim.lr_scheduler.OneCycleLR(__snake_case , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
UpperCAmelCase_ : List[str] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
UpperCAmelCase_ : Dict = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(__snake_case )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
@require_cuda
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : Union[str, Any] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(_UpperCamelCase ):
UpperCAmelCase_ : List[str] = Accelerator(cpu=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : Optional[int] = Accelerator()
UpperCAmelCase_ : Dict = GradientState()
assert state.num_steps == 1
UpperCAmelCase_ : Any = 4
assert state.num_steps == 4
assert state.sync_gradients is True
UpperCAmelCase_ : Tuple = False
assert state.sync_gradients is False
GradientState._reset_state()
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : Union[str, Any] = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = create_components()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Any = accelerator.prepare(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : List[str] = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = create_components()
accelerator.prepare(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def __UpperCAmelCase ( self ) -> Tuple:
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*_UpperCamelCase , **_UpperCamelCase ):
pass
with patch('torch.cuda.set_device' , _UpperCamelCase ), patch_environment(ACCELERATE_TORCH_DEVICE='cuda:64' ):
UpperCAmelCase_ : Any = Accelerator()
self.assertEqual(str(accelerator.state.device ) , 'cuda:64' )
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : Tuple = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = create_components()
accelerator.prepare(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Dict = get_signature(_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_UpperCamelCase )
# make sure random weights don't match
load_random_weights(_UpperCamelCase )
self.assertTrue(abs(model_signature - get_signature(_UpperCamelCase ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(_UpperCamelCase )
self.assertTrue(abs(model_signature - get_signature(_UpperCamelCase ) ) < 1E-3 )
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : List[Any] = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = create_components()
accelerator.prepare(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = get_signature(_UpperCamelCase )
# saving hook
def save_config(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : str = {'class_name': models[0].__class__.__name__}
with open(os.path.join(_UpperCamelCase , 'data.json' ) , 'w' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase )
# loading hook
def load_config(_UpperCamelCase , _UpperCamelCase ):
with open(os.path.join(_UpperCamelCase , 'data.json' ) , 'r' ) as f:
UpperCAmelCase_ : Optional[Any] = json.load(_UpperCamelCase )
UpperCAmelCase_ : str = config['class_name']
UpperCAmelCase_ : Union[str, Any] = accelerator.register_save_state_pre_hook(_UpperCamelCase )
UpperCAmelCase_ : Any = accelerator.register_load_state_pre_hook(_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_UpperCamelCase )
# make sure random weights don't match with hooks
load_random_weights(_UpperCamelCase )
self.assertTrue(abs(model_signature - get_signature(_UpperCamelCase ) ) > 1E-3 )
# random class name to verify correct one is loaded
UpperCAmelCase_ : List[str] = 'random'
# make sure loaded weights match with hooks
accelerator.load_state(_UpperCamelCase )
self.assertTrue(abs(model_signature - get_signature(_UpperCamelCase ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_UpperCamelCase )
# make sure random weights don't match with hooks removed
load_random_weights(_UpperCamelCase )
self.assertTrue(abs(model_signature - get_signature(_UpperCamelCase ) ) > 1E-3 )
# random class name to verify correct one is loaded
UpperCAmelCase_ : Union[str, Any] = 'random'
# make sure loaded weights match with hooks removed
accelerator.load_state(_UpperCamelCase )
self.assertTrue(abs(model_signature - get_signature(_UpperCamelCase ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : Optional[int] = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = create_components()
UpperCAmelCase_ : Tuple = None
# This should work
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
self.assertTrue(dummy_obj is None )
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : List[Any] = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = create_components()
UpperCAmelCase_ : List[Any] = [1, 2, 3]
# This should work
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
self.assertEqual(
getattr(_UpperCamelCase , '_is_accelerate_prepared' , _UpperCamelCase ) , _UpperCamelCase , 'Dummy object should have `_is_accelerate_prepared` set to `True`' , )
self.assertEqual(
getattr(_UpperCamelCase , '_is_accelerate_prepared' , _UpperCamelCase ) , _UpperCamelCase , 'Model is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(_UpperCamelCase , '_is_accelerate_prepared' , _UpperCamelCase ) , _UpperCamelCase , 'Optimizer is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(_UpperCamelCase , '_is_accelerate_prepared' , _UpperCamelCase ) , _UpperCamelCase , 'Scheduler is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(_UpperCamelCase , '_is_accelerate_prepared' , _UpperCamelCase ) , _UpperCamelCase , 'Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(_UpperCamelCase , '_is_accelerate_prepared' , _UpperCamelCase ) , _UpperCamelCase , 'Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`' , )
@slow
@require_bnb
def __UpperCAmelCase ( self ) -> Dict:
from transformers import AutoModelForCausalLM
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=_UpperCamelCase , device_map={'': 0} , )
UpperCAmelCase_ : List[str] = Accelerator()
# This should work
UpperCAmelCase_ : Union[str, Any] = accelerator.prepare(_UpperCamelCase )
@slow
@require_bnb
def __UpperCAmelCase ( self ) -> Any:
from transformers import AutoModelForCausalLM
UpperCAmelCase_ : Optional[int] = Accelerator()
with init_empty_weights():
UpperCAmelCase_ : List[Any] = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
model.tie_weights()
UpperCAmelCase_ : Any = infer_auto_device_map(_UpperCamelCase )
UpperCAmelCase_ : int = 'cpu'
UpperCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , device_map=_UpperCamelCase , load_in_abit=_UpperCamelCase , llm_inta_enable_fpaa_cpu_offload=_UpperCamelCase )
# This should not work and get value error
with self.assertRaises(_UpperCamelCase ):
UpperCAmelCase_ : Optional[Any] = accelerator.prepare(_UpperCamelCase )
@slow
@require_bnb
@require_multi_gpu
def __UpperCAmelCase ( self ) -> Optional[Any]:
from transformers import AutoModelForCausalLM
UpperCAmelCase_ : List[Any] = {'distributed_type': DistributedType.MULTI_GPU}
with init_empty_weights():
UpperCAmelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
model.tie_weights()
UpperCAmelCase_ : Optional[Any] = infer_auto_device_map(_UpperCamelCase )
UpperCAmelCase_ : Any = 1
UpperCAmelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=_UpperCamelCase , device_map=_UpperCamelCase , )
UpperCAmelCase_ : str = Accelerator()
# This should not work and get value error
with self.assertRaises(_UpperCamelCase ):
UpperCAmelCase_ : str = accelerator.prepare(_UpperCamelCase )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def __UpperCAmelCase ( self ) -> Tuple:
from transformers import AutoModelForCausalLM
with init_empty_weights():
UpperCAmelCase_ : List[Any] = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
UpperCAmelCase_ : Tuple = infer_auto_device_map(_UpperCamelCase )
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=_UpperCamelCase , device_map=_UpperCamelCase , )
UpperCAmelCase_ : int = Accelerator()
# This should work
UpperCAmelCase_ : Any = accelerator.prepare(_UpperCamelCase )
@require_cuda
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : str = torch.nn.Linear(1_0 , 1_0 )
UpperCAmelCase_ : Dict = torch.optim.SGD(model.parameters() , lr=0.01 )
UpperCAmelCase_ : Any = Accelerator(cpu=_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = accelerator.prepare(_UpperCamelCase )
| 29 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 | 1 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece.model')
__UpperCAmelCase = {'target_lang': 'fi', 'source_lang': 'en'}
__UpperCAmelCase = '>>zh<<'
__UpperCAmelCase = 'Helsinki-NLP/'
if is_torch_available():
__UpperCAmelCase = 'pt'
elif is_tf_available():
__UpperCAmelCase = 'tf'
else:
__UpperCAmelCase = 'jax'
@require_sentencepiece
class lowerCamelCase (_snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : Any = MarianTokenizer
_snake_case : Union[str, Any] = False
_snake_case : Optional[Any] = True
def __UpperCAmelCase ( self ) -> Optional[int]:
super().setUp()
UpperCAmelCase_ : List[Any] = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
UpperCAmelCase_ : str = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
UpperCAmelCase_ : Any = Path(self.tmpdirname )
save_json(_UpperCamelCase , save_dir / VOCAB_FILES_NAMES['vocab'] )
save_json(_UpperCamelCase , save_dir / VOCAB_FILES_NAMES['tokenizer_config_file'] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(_UpperCamelCase , save_dir / VOCAB_FILES_NAMES['source_spm'] )
copyfile(_UpperCamelCase , save_dir / VOCAB_FILES_NAMES['target_spm'] )
UpperCAmelCase_ : Optional[int] = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self , **_UpperCamelCase ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Dict:
return (
"This is a test",
"This is a test",
)
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : int = '</s>'
UpperCAmelCase_ : List[str] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase ) , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(_UpperCamelCase ) , 9 )
def __UpperCAmelCase ( self ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : int = MarianTokenizer.from_pretrained(f"{ORG_NAME}opus-mt-en-de" )
UpperCAmelCase_ : Tuple = en_de_tokenizer(['I am a small frog'] , return_tensors=_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Any = [3_8, 1_2_1, 1_4, 6_9_7, 3_8_8_4_8, 0]
self.assertListEqual(_UpperCamelCase , batch.input_ids[0] )
UpperCAmelCase_ : Optional[int] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(_UpperCamelCase )
UpperCAmelCase_ : Tuple = [x.name for x in Path(_UpperCamelCase ).glob('*' )]
self.assertIn('source.spm' , _UpperCamelCase )
MarianTokenizer.from_pretrained(_UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
UpperCAmelCase_ : int = tok(
['I am a small frog' * 1_0_0_0, 'I am a small frog'] , padding=_UpperCamelCase , truncation=_UpperCamelCase , return_tensors=_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2) )
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase_ : Any = tok(['I am a tiny frog', 'I am a small frog'] , padding=_UpperCamelCase , return_tensors=_UpperCamelCase )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(batch_smaller.input_ids.shape , (2, 1_0) )
@slow
def __UpperCAmelCase ( self ) -> Optional[Any]:
# fmt: off
UpperCAmelCase_ : Any = {'input_ids': [[4_3_4_9_5, 4_6_2, 2_0, 4_2_1_6_4, 1_3_6_9, 5_2, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 7_4_9_1, 3_8_9_9_9, 6, 8, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 4_6_6_9, 3_7_8_6_7, 1_3, 7_5_2_5, 2_7, 1_5_9_3, 9_8_8, 1_3, 3_3_9_7_2, 7_0_2_9, 6, 2_0, 8_2_5_1, 3_8_3, 2, 2_7_0, 5_8_6_6, 3_7_8_8, 2, 2_3_5_3, 8_2_5_1, 1_2_3_3_8, 2, 1_3_9_5_8, 3_8_7, 2, 3_6_2_9, 6_9_5_3, 1_8_8, 2_9_0_0, 2, 1_3_9_5_8, 8_0_1_1, 1_1_5_0_1, 2_3, 8_4_6_0, 4_0_7_3, 3_4_0_0_9, 2_0, 4_3_5, 1_1_4_3_9, 2_7, 8, 8_4_6_0, 4_0_7_3, 6_0_0_4, 2_0, 9_9_8_8, 3_7_5, 2_7, 3_3, 2_6_6, 1_9_4_5, 1_0_7_6, 1_3_5_0, 3_7_8_6_7, 3_2_8_8, 5, 5_7_7, 1_0_7_6, 4_3_7_4, 8, 5_0_8_2, 5, 2_6_4_5_3, 2_5_7, 5_5_6, 4_0_3, 2, 2_4_2, 1_3_2, 3_8_3, 3_1_6, 4_9_2, 8, 1_0_7_6_7, 6, 3_1_6, 3_0_4, 4_2_3_9, 3, 0], [1_4_8, 1_5_7_2_2, 1_9, 1_8_3_9, 1_2, 1_3_5_0, 1_3, 2_2_3_2_7, 5_0_8_2, 5_4_1_8, 4_7_5_6_7, 3_5_9_3_8, 5_9, 3_1_8, 1_9_5_5_2, 1_0_8, 2_1_8_3, 5_4, 1_4_9_7_6, 4_8_3_5, 3_2, 5_4_7, 1_1_1_4, 8, 3_1_5, 2_4_1_7, 5, 9_2, 1_9_0_8_8, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0], [3_6, 6_3_9_5, 1_2_5_7_0, 3_9_1_4_7, 1_1_5_9_7, 6, 2_6_6, 4, 4_5_4_0_5, 7_2_9_6, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCamelCase , model_name='Helsinki-NLP/opus-mt-en-de' , revision='1a8c2263da11e68e50938f97e10cd57820bd504c' , decode_kwargs={'use_source_tokenizer': True} , )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = MarianTokenizer.from_pretrained('hf-internal-testing/test-marian-two-vocabs' )
UpperCAmelCase_ : Optional[int] = 'Tämä on testi'
UpperCAmelCase_ : str = 'This is a test'
UpperCAmelCase_ : Any = [7_6, 7, 2_0_4_7, 2]
UpperCAmelCase_ : List[str] = [6_9, 1_2, 1_1, 9_4_0, 2]
UpperCAmelCase_ : Any = tokenizer(_UpperCamelCase ).input_ids
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[int] = tokenizer(text_target=_UpperCamelCase ).input_ids
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : str = tokenizer.decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
| 29 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase=None , **_UpperCamelCase ) -> Dict:
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
UpperCAmelCase_ : Any = model
UpperCAmelCase_ : int = kwargs.get('model_save_dir' , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = kwargs.get('latest_model_name' , _UpperCamelCase )
def __call__( self , **_UpperCamelCase ) -> str:
UpperCAmelCase_ : Optional[int] = {k: np.array(_UpperCamelCase ) for k, v in kwargs.items()}
return self.model.run(_UpperCamelCase , _UpperCamelCase )
@staticmethod
def __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ) -> List[Any]:
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
UpperCAmelCase_ : List[str] = 'CPUExecutionProvider'
return ort.InferenceSession(_UpperCamelCase , providers=[provider] , sess_options=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME
UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name )
UpperCAmelCase_ : str = Path(_UpperCamelCase ).joinpath(_UpperCamelCase )
try:
shutil.copyfile(_UpperCamelCase , _UpperCamelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(_UpperCamelCase )
if src_path.exists():
UpperCAmelCase_ : List[Any] = Path(_UpperCamelCase ).joinpath(_UpperCamelCase )
try:
shutil.copyfile(_UpperCamelCase , _UpperCamelCase )
except shutil.SameFileError:
pass
def __UpperCAmelCase ( self , _UpperCamelCase , **_UpperCamelCase , ) -> List[str]:
if os.path.isfile(_UpperCamelCase ):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file" )
return
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
# saving model weights/files
self._save_pretrained(_UpperCamelCase , **_UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> List[str]:
UpperCAmelCase_ : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(
os.path.join(_UpperCamelCase , _UpperCamelCase ) , provider=_UpperCamelCase , sess_options=_UpperCamelCase )
UpperCAmelCase_ : Tuple = Path(_UpperCamelCase )
# load model from hub
else:
# download model
UpperCAmelCase_ : List[str] = hf_hub_download(
repo_id=_UpperCamelCase , filename=_UpperCamelCase , use_auth_token=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , )
UpperCAmelCase_ : Union[str, Any] = Path(_UpperCamelCase ).parent
UpperCAmelCase_ : List[str] = Path(_UpperCamelCase ).name
UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(_UpperCamelCase , provider=_UpperCamelCase , sess_options=_UpperCamelCase )
return cls(model=_UpperCamelCase , **_UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> Optional[int]:
UpperCAmelCase_ : List[str] = None
if len(str(_UpperCamelCase ).split('@' ) ) == 2:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = model_id.split('@' )
return cls._from_pretrained(
model_id=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , use_auth_token=_UpperCamelCase , **_UpperCamelCase , )
| 29 | 1 |
from functools import lru_cache
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = 2
UpperCAmelCase_ : Union[str, Any] = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__snake_case )
if n > 1:
factors.add(__snake_case )
return factors
@lru_cache
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
return len(unique_prime_factors(__snake_case ) )
def lowercase__ ( __snake_case : list ):
'''simple docstring'''
return len(set(__snake_case ) ) in (0, 1)
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = 2
while True:
# Increment each value of a generated range
UpperCAmelCase_ : int = [base + i for i in range(__snake_case )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
UpperCAmelCase_ : Optional[Any] = [upf_len(__snake_case ) for x in group]
checker.append(__snake_case )
# If all numbers in the list are equal, return the group variable.
if equality(__snake_case ):
return group
# Increment our base variable by 1
base += 1
def lowercase__ ( __snake_case : int = 4 ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = run(__snake_case )
return results[0] if len(__snake_case ) else None
if __name__ == "__main__":
print(solution())
| 29 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = 10
UpperCAmelCase_ : Tuple = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
UpperCAmelCase_ : Tuple = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(__snake_case ) ),
} , features=__snake_case , )
return dataset
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : str = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=__snake_case )
return filename
# FILE_CONTENT + files
__UpperCAmelCase = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt'
UpperCAmelCase_ : Tuple = FILE_CONTENT
with open(__snake_case , 'w' ) as f:
f.write(__snake_case )
return filename
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
import bza
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
UpperCAmelCase_ : str = bytes(__snake_case , 'utf-8' )
with bza.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
UpperCAmelCase_ : Dict = bytes(__snake_case , 'utf-8' )
with gzip.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' )
with lza.frame.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : List[Any] ):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(__snake_case , 'w' ) as archive:
archive.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any] ):
'''simple docstring'''
import tarfile
UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
import lzma
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' )
with lzma.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[int] , __snake_case : Optional[Any] ):
'''simple docstring'''
import zipfile
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
UpperCAmelCase_ : Tuple = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
UpperCAmelCase_ : List[str] = bytes(__snake_case , 'utf-8' )
with zstd.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.xml'
UpperCAmelCase_ : List[Any] = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(__snake_case , 'w' ) as f:
f.write(__snake_case )
return filename
__UpperCAmelCase = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
__UpperCAmelCase = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
__UpperCAmelCase = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
__UpperCAmelCase = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
__UpperCAmelCase = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = datasets.Dataset.from_dict(__snake_case )
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(__snake_case ) ) as con:
UpperCAmelCase_ : List[Any] = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(__snake_case , 'w' , newline='' ) as f:
UpperCAmelCase_ : Tuple = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(__snake_case , 'w' , newline='' ) as f:
UpperCAmelCase_ : Optional[Any] = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Any ):
'''simple docstring'''
import bza
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(__snake_case , 'rb' ) as f:
UpperCAmelCase_ : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] , __snake_case : Tuple , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(__snake_case , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : int , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
UpperCAmelCase_ : Dict = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(__snake_case , 'wb' ) as f:
UpperCAmelCase_ : List[Any] = pq.ParquetWriter(__snake_case , schema=__snake_case )
UpperCAmelCase_ : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__snake_case ) )] for k in DATA[0]} , schema=__snake_case )
writer.write_table(__snake_case )
writer.close()
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
UpperCAmelCase_ : Optional[int] = {'data': DATA}
with open(__snake_case , 'w' ) as f:
json.dump(__snake_case , __snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
UpperCAmelCase_ : Tuple = {'data': DATA_DICT_OF_LISTS}
with open(__snake_case , 'w' ) as f:
json.dump(__snake_case , __snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : Dict ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(__snake_case , 'rb' ) as orig_file:
with gzip.open(__snake_case , 'wb' ) as zipped_file:
zipped_file.writelines(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int , __snake_case : Any ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(__snake_case , 'rb' ) as orig_file:
with gzip.open(__snake_case , 'wb' ) as zipped_file:
zipped_file.writelines(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : str , __snake_case : Dict , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : str , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Any , __snake_case : Any , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Any = ['0', '1', '2', '3']
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = ['0', '1', '2', '3']
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ['0', '1', '2', '3']
UpperCAmelCase_ : List[str] = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : str , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename('unsupported.ext' ) )
f.write(__snake_case , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(__snake_case , 'w' , encoding='utf-8' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 29 | 1 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase=1_3 , _UpperCamelCase=3_2 , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=[1_0, 2_0, 3_0, 4_0] , _UpperCamelCase=[2, 2, 3, 2] , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=3_7 , _UpperCamelCase="gelu" , _UpperCamelCase=1_0 , _UpperCamelCase=0.02 , _UpperCamelCase=["stage2", "stage3", "stage4"] , _UpperCamelCase=3 , _UpperCamelCase=None , ) -> Tuple:
UpperCAmelCase_ : Optional[int] = parent
UpperCAmelCase_ : Any = batch_size
UpperCAmelCase_ : Union[str, Any] = image_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : Union[str, Any] = num_stages
UpperCAmelCase_ : Dict = hidden_sizes
UpperCAmelCase_ : Union[str, Any] = depths
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : List[Any] = use_labels
UpperCAmelCase_ : int = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : List[Any] = type_sequence_label_size
UpperCAmelCase_ : Any = initializer_range
UpperCAmelCase_ : List[Any] = out_features
UpperCAmelCase_ : str = num_labels
UpperCAmelCase_ : Tuple = scope
UpperCAmelCase_ : List[Any] = num_stages
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : List[str] = None
if self.use_labels:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Dict = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ) -> Tuple:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def __UpperCAmelCase ( self ) -> Optional[Any]:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_1_2 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_UpperCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=4_0 , auxiliary_channels=2_5_6 , auxiliary_num_convs=1 , auxiliary_concat_input=_UpperCamelCase , loss_ignore_index=2_5_5 , num_labels=self.num_labels , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : Optional[int] = UperNetForSemanticSegmentation(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : int = model(_UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : List[str] = config_and_inputs
UpperCAmelCase_ : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : str = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
_snake_case : List[str] = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
_snake_case : Tuple = False
_snake_case : Optional[Any] = False
_snake_case : Optional[Any] = False
_snake_case : List[str] = False
_snake_case : Any = False
_snake_case : Dict = False
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : Optional[int] = UperNetModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=3_7 )
def __UpperCAmelCase ( self ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self ) -> List[Any]:
return
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[Any] = model_class(_UpperCamelCase )
UpperCAmelCase_ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : str = [*signature.parameters.keys()]
UpperCAmelCase_ : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCamelCase )
@unittest.skip(reason='UperNet does not use inputs_embeds' )
def __UpperCAmelCase ( self ) -> int:
pass
@unittest.skip(reason='UperNet does not support input and output embeddings' )
def __UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='UperNet does not have a base model' )
def __UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip(reason='UperNet does not have a base model' )
def __UpperCAmelCase ( self ) -> Any:
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def __UpperCAmelCase ( self ) -> int:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self ) -> str:
pass
def __UpperCAmelCase ( self ) -> Optional[int]:
def check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Tuple = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : int = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
UpperCAmelCase_ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ : Tuple = self.model_tester.num_stages
self.assertEqual(len(_UpperCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Tuple = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : int = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : int = _config_zero_init(_UpperCamelCase )
UpperCAmelCase_ : List[str] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[str] = model_class(config=_UpperCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip(reason='UperNet does not have tied weights' )
def __UpperCAmelCase ( self ) -> Optional[int]:
pass
@slow
def __UpperCAmelCase ( self ) -> List[Any]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Tuple = UperNetForSemanticSegmentation.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : str = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k' , repo_type='dataset' , filename='ADE_val_00000001.jpg' )
UpperCAmelCase_ : Optional[Any] = Image.open(__snake_case ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : int = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' )
UpperCAmelCase_ : Optional[int] = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = prepare_img()
UpperCAmelCase_ : Optional[Any] = processor(images=_UpperCamelCase , return_tensors='pt' ).to(_UpperCamelCase )
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**_UpperCamelCase )
UpperCAmelCase_ : Any = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
UpperCAmelCase_ : List[str] = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _UpperCamelCase , atol=1E-4 ) )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' )
UpperCAmelCase_ : str = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(_UpperCamelCase )
UpperCAmelCase_ : Any = prepare_img()
UpperCAmelCase_ : List[str] = processor(images=_UpperCamelCase , return_tensors='pt' ).to(_UpperCamelCase )
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**_UpperCamelCase )
UpperCAmelCase_ : int = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
UpperCAmelCase_ : Dict = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _UpperCamelCase , atol=1E-4 ) )
| 29 |
from __future__ import annotations
def lowercase__ ( __snake_case : tuple[int, int] , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position
UpperCAmelCase_ : str = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCAmelCase_ : Optional[Any] = []
for position in positions:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__snake_case )
return permissible_positions
def lowercase__ ( __snake_case : list[list[int]] ):
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def lowercase__ ( __snake_case : list[list[int]] , __snake_case : tuple[int, int] , __snake_case : int ):
'''simple docstring'''
if is_complete(__snake_case ):
return True
for position in get_valid_pos(__snake_case , len(__snake_case ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Any = position
if board[y][x] == 0:
UpperCAmelCase_ : Optional[Any] = curr + 1
if open_knight_tour_helper(__snake_case , __snake_case , curr + 1 ):
return True
UpperCAmelCase_ : List[Any] = 0
return False
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : str = [[0 for i in range(__snake_case )] for j in range(__snake_case )]
for i in range(__snake_case ):
for j in range(__snake_case ):
UpperCAmelCase_ : Optional[Any] = 1
if open_knight_tour_helper(__snake_case , (i, j) , 1 ):
return board
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : List[str] = F"Open Kight Tour cannot be performed on a board of size {n}"
raise ValueError(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 1 |
__UpperCAmelCase = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 29 |
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : list[list[int]] = [[0 for _ in range(__snake_case )] for _ in range(m + 1 )]
for i in range(m + 1 ):
UpperCAmelCase_ : Optional[Any] = 1
for n in range(m + 1 ):
for k in range(1 , __snake_case ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__UpperCAmelCase = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 29 | 1 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def lowercase__ ( __snake_case : List[str] , __snake_case : int , __snake_case : Tuple=8 ):
'''simple docstring'''
UpperCAmelCase_ : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase_ : List[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowercase__ ( __snake_case : Any , __snake_case : int=512 , __snake_case : Dict=512 ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCAmelCase_ : Dict = np.array(pil_image.convert('RGB' ) )
UpperCAmelCase_ : Any = arr.astype(np.floataa ) / 127.5 - 1
UpperCAmelCase_ : Dict = np.transpose(__snake_case , [2, 0, 1] )
UpperCAmelCase_ : List[str] = torch.from_numpy(__snake_case ).unsqueeze(0 )
return image
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
unet=_UpperCamelCase , scheduler=_UpperCamelCase , movq=_UpperCamelCase , )
UpperCAmelCase_ : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
# get the original timestep using init_timestep
UpperCAmelCase_ : Any = min(int(num_inference_steps * strength ) , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase_ : str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> Tuple:
if not isinstance(_UpperCamelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_UpperCamelCase )}" )
UpperCAmelCase_ : List[str] = image.to(device=_UpperCamelCase , dtype=_UpperCamelCase )
UpperCAmelCase_ : List[str] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCAmelCase_ : List[str] = image
else:
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_UpperCamelCase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Any = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCamelCase )
]
UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase , dim=0 )
else:
UpperCAmelCase_ : Union[str, Any] = self.movq.encode(_UpperCamelCase ).latent_dist.sample(_UpperCamelCase )
UpperCAmelCase_ : int = self.movq.config.scaling_factor * init_latents
UpperCAmelCase_ : Optional[int] = torch.cat([init_latents] , dim=0 )
UpperCAmelCase_ : Tuple = init_latents.shape
UpperCAmelCase_ : List[Any] = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
# get latents
UpperCAmelCase_ : str = self.scheduler.add_noise(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = init_latents
return latents
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Any:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase_ : Optional[Any] = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase_ : str = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = cpu_offload_with_hook(_UpperCamelCase , _UpperCamelCase , prev_module_hook=_UpperCamelCase )
# We'll offload the last model manually.
UpperCAmelCase_ : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self ) -> Dict:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_UpperCamelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 1_0_0 , _UpperCamelCase = 4.0 , _UpperCamelCase = 0.3 , _UpperCamelCase = 1 , _UpperCamelCase = None , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> str:
UpperCAmelCase_ : Any = self._execution_device
UpperCAmelCase_ : Union[str, Any] = guidance_scale > 1.0
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : str = torch.cat(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[Any] = image_embeds.shape[0]
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : int = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : int = negative_image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_UpperCamelCase )
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Tuple = [image]
if not all(isinstance(_UpperCamelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(_UpperCamelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
UpperCAmelCase_ : str = torch.cat([prepare_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) for i in image] , dim=0 )
UpperCAmelCase_ : Any = image.to(dtype=image_embeds.dtype , device=_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.movq.encode(_UpperCamelCase )['latents']
UpperCAmelCase_ : List[Any] = latents.repeat_interleave(_UpperCamelCase , dim=0 )
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.get_timesteps(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCAmelCase_ , UpperCAmelCase_ : str = downscale_height_and_width(_UpperCamelCase , _UpperCamelCase , self.movq_scale_factor )
UpperCAmelCase_ : Dict = self.prepare_latents(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : str = {'image_embeds': image_embeds}
UpperCAmelCase_ : Union[str, Any] = self.unet(
sample=_UpperCamelCase , timestep=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , added_cond_kwargs=_UpperCamelCase , return_dict=_UpperCamelCase , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 )
UpperCAmelCase_ , UpperCAmelCase_ : str = variance_pred.chunk(2 )
UpperCAmelCase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ , UpperCAmelCase_ : int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : List[str] = self.scheduler.step(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase , )[0]
# post-processing
UpperCAmelCase_ : Optional[Any] = self.movq.decode(_UpperCamelCase , force_not_quantize=_UpperCamelCase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCAmelCase_ : List[str] = image * 0.5 + 0.5
UpperCAmelCase_ : List[Any] = image.clamp(0 , 1 )
UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : List[Any] = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCamelCase )
| 29 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int:
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
self.check_model_type(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = {}, {}
if padding is not None:
UpperCAmelCase_ : List[str] = padding
if truncation is not None:
UpperCAmelCase_ : Tuple = truncation
if top_k is not None:
UpperCAmelCase_ : Dict = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> int:
if isinstance(_UpperCamelCase , (Image.Image, str) ) and isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Optional[Any] = {'image': image, 'question': question}
else:
UpperCAmelCase_ : List[str] = image
UpperCAmelCase_ : Optional[Any] = super().__call__(_UpperCamelCase , **_UpperCamelCase )
return results
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = load_image(inputs['image'] )
UpperCAmelCase_ : Dict = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=_UpperCamelCase , truncation=_UpperCamelCase )
UpperCAmelCase_ : int = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework )
model_inputs.update(_UpperCamelCase )
return model_inputs
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : Any = self.model(**_UpperCamelCase )
return model_outputs
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=5 ) -> str:
if top_k > self.model.config.num_labels:
UpperCAmelCase_ : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ : List[str] = model_outputs.logits.sigmoid()[0]
UpperCAmelCase_ , UpperCAmelCase_ : str = probs.topk(_UpperCamelCase )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
UpperCAmelCase_ : Optional[Any] = scores.tolist()
UpperCAmelCase_ : Tuple = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase )]
| 29 | 1 |
from math import sqrt
def lowercase__ ( __snake_case : int = 1_000_000 ):
'''simple docstring'''
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__snake_case , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F'{solution() = }')
| 29 |
import os
# Precomputes a list of the 100 first triangular numbers
__UpperCAmelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Any = os.path.dirname(os.path.realpath(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = os.path.join(__snake_case , 'words.txt' )
UpperCAmelCase_ : Union[str, Any] = ''
with open(__snake_case ) as f:
UpperCAmelCase_ : List[Any] = f.readline()
UpperCAmelCase_ : Optional[int] = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
UpperCAmelCase_ : Optional[int] = [
word
for word in [sum(ord(__snake_case ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__snake_case )
if __name__ == "__main__":
print(solution())
| 29 | 1 |
from __future__ import annotations
def lowercase__ ( __snake_case : int , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : list[list[int]] = []
create_all_state(1 , __snake_case , __snake_case , [] , __snake_case )
return result
def lowercase__ ( __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : list[int] , __snake_case : list[list[int]] , ):
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(__snake_case , total_number - level + 2 ):
current_list.append(__snake_case )
create_all_state(i + 1 , __snake_case , level - 1 , __snake_case , __snake_case )
current_list.pop()
def lowercase__ ( __snake_case : list[list[int]] ):
'''simple docstring'''
for i in total_list:
print(*__snake_case )
if __name__ == "__main__":
__UpperCAmelCase = 4
__UpperCAmelCase = 2
__UpperCAmelCase = generate_all_combinations(n, k)
print_all_state(total_list)
| 29 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__UpperCAmelCase = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__UpperCAmelCase = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
if "://" in dataset_path:
UpperCAmelCase_ : int = dataset_path.split('://' )[1]
return dataset_path
def lowercase__ ( __snake_case : fsspec.AbstractFileSystem ):
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( __snake_case : fsspec.AbstractFileSystem , __snake_case : str , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = not is_remote_filesystem(__snake_case )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__snake_case ) , fs._strip_protocol(__snake_case ) )
else:
fs.mv(__snake_case , __snake_case , recursive=__snake_case )
def lowercase__ ( ):
'''simple docstring'''
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : int = threading.Lock()
| 29 | 1 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int:
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
self.check_model_type(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = {}, {}
if padding is not None:
UpperCAmelCase_ : List[str] = padding
if truncation is not None:
UpperCAmelCase_ : Tuple = truncation
if top_k is not None:
UpperCAmelCase_ : Dict = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> int:
if isinstance(_UpperCamelCase , (Image.Image, str) ) and isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Optional[Any] = {'image': image, 'question': question}
else:
UpperCAmelCase_ : List[str] = image
UpperCAmelCase_ : Optional[Any] = super().__call__(_UpperCamelCase , **_UpperCamelCase )
return results
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = load_image(inputs['image'] )
UpperCAmelCase_ : Dict = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=_UpperCamelCase , truncation=_UpperCamelCase )
UpperCAmelCase_ : int = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework )
model_inputs.update(_UpperCamelCase )
return model_inputs
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : Any = self.model(**_UpperCamelCase )
return model_outputs
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=5 ) -> str:
if top_k > self.model.config.num_labels:
UpperCAmelCase_ : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ : List[str] = model_outputs.logits.sigmoid()[0]
UpperCAmelCase_ , UpperCAmelCase_ : str = probs.topk(_UpperCamelCase )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
UpperCAmelCase_ : Optional[Any] = scores.tolist()
UpperCAmelCase_ : Tuple = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase )]
| 29 |
def lowercase__ ( __snake_case : list ):
'''simple docstring'''
for i in range(len(__snake_case ) - 1 , 0 , -1 ):
UpperCAmelCase_ : Dict = False
for j in range(__snake_case , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Any = unsorted[j - 1], unsorted[j]
UpperCAmelCase_ : int = True
for j in range(__snake_case ):
if unsorted[j] > unsorted[j + 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = unsorted[j + 1], unsorted[j]
UpperCAmelCase_ : Any = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip()
__UpperCAmelCase = [int(item) for item in user_input.split(',')]
print(F'{cocktail_shaker_sort(unsorted) = }')
| 29 | 1 |
def lowercase__ ( __snake_case : str , __snake_case : bool = False ):
'''simple docstring'''
if not isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ : Optional[int] = F"Expected string as input, found {type(__snake_case )}"
raise ValueError(__snake_case )
if not isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ : Optional[int] = F"Expected boolean as use_pascal parameter, found {type(__snake_case )}"
raise ValueError(__snake_case )
UpperCAmelCase_ : Optional[Any] = input_str.split('_' )
UpperCAmelCase_ : Any = 0 if use_pascal else 1
UpperCAmelCase_ : Optional[int] = words[start_index:]
UpperCAmelCase_ : int = [word[0].upper() + word[1:] for word in words_to_capitalize]
UpperCAmelCase_ : Optional[Any] = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 29 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def lowercase__ ( __snake_case : List[str] , __snake_case : int , __snake_case : Tuple=8 ):
'''simple docstring'''
UpperCAmelCase_ : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase_ : List[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowercase__ ( __snake_case : Any , __snake_case : int=512 , __snake_case : Dict=512 ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCAmelCase_ : Dict = np.array(pil_image.convert('RGB' ) )
UpperCAmelCase_ : Any = arr.astype(np.floataa ) / 127.5 - 1
UpperCAmelCase_ : Dict = np.transpose(__snake_case , [2, 0, 1] )
UpperCAmelCase_ : List[str] = torch.from_numpy(__snake_case ).unsqueeze(0 )
return image
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
unet=_UpperCamelCase , scheduler=_UpperCamelCase , movq=_UpperCamelCase , )
UpperCAmelCase_ : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
# get the original timestep using init_timestep
UpperCAmelCase_ : Any = min(int(num_inference_steps * strength ) , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase_ : str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> Tuple:
if not isinstance(_UpperCamelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_UpperCamelCase )}" )
UpperCAmelCase_ : List[str] = image.to(device=_UpperCamelCase , dtype=_UpperCamelCase )
UpperCAmelCase_ : List[str] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCAmelCase_ : List[str] = image
else:
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_UpperCamelCase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Any = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCamelCase )
]
UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase , dim=0 )
else:
UpperCAmelCase_ : Union[str, Any] = self.movq.encode(_UpperCamelCase ).latent_dist.sample(_UpperCamelCase )
UpperCAmelCase_ : int = self.movq.config.scaling_factor * init_latents
UpperCAmelCase_ : Optional[int] = torch.cat([init_latents] , dim=0 )
UpperCAmelCase_ : Tuple = init_latents.shape
UpperCAmelCase_ : List[Any] = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
# get latents
UpperCAmelCase_ : str = self.scheduler.add_noise(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = init_latents
return latents
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Any:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase_ : Optional[Any] = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase_ : str = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = cpu_offload_with_hook(_UpperCamelCase , _UpperCamelCase , prev_module_hook=_UpperCamelCase )
# We'll offload the last model manually.
UpperCAmelCase_ : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self ) -> Dict:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_UpperCamelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 1_0_0 , _UpperCamelCase = 4.0 , _UpperCamelCase = 0.3 , _UpperCamelCase = 1 , _UpperCamelCase = None , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> str:
UpperCAmelCase_ : Any = self._execution_device
UpperCAmelCase_ : Union[str, Any] = guidance_scale > 1.0
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : str = torch.cat(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[Any] = image_embeds.shape[0]
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : int = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : int = negative_image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_UpperCamelCase )
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Tuple = [image]
if not all(isinstance(_UpperCamelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(_UpperCamelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
UpperCAmelCase_ : str = torch.cat([prepare_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) for i in image] , dim=0 )
UpperCAmelCase_ : Any = image.to(dtype=image_embeds.dtype , device=_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.movq.encode(_UpperCamelCase )['latents']
UpperCAmelCase_ : List[Any] = latents.repeat_interleave(_UpperCamelCase , dim=0 )
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.get_timesteps(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCAmelCase_ , UpperCAmelCase_ : str = downscale_height_and_width(_UpperCamelCase , _UpperCamelCase , self.movq_scale_factor )
UpperCAmelCase_ : Dict = self.prepare_latents(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : str = {'image_embeds': image_embeds}
UpperCAmelCase_ : Union[str, Any] = self.unet(
sample=_UpperCamelCase , timestep=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , added_cond_kwargs=_UpperCamelCase , return_dict=_UpperCamelCase , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 )
UpperCAmelCase_ , UpperCAmelCase_ : str = variance_pred.chunk(2 )
UpperCAmelCase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ , UpperCAmelCase_ : int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : List[str] = self.scheduler.step(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase , )[0]
# post-processing
UpperCAmelCase_ : Optional[Any] = self.movq.decode(_UpperCamelCase , force_not_quantize=_UpperCamelCase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCAmelCase_ : List[str] = image * 0.5 + 0.5
UpperCAmelCase_ : List[Any] = image.clamp(0 , 1 )
UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : List[Any] = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCamelCase )
| 29 | 1 |
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
if n == 1 or not isinstance(__snake_case , __snake_case ):
return 0
elif n == 2:
return 1
else:
UpperCAmelCase_ : Tuple = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : Any = 2
while digits < n:
index += 1
UpperCAmelCase_ : Tuple = len(str(fibonacci(__snake_case ) ) )
return index
def lowercase__ ( __snake_case : int = 1_000 ):
'''simple docstring'''
return fibonacci_digits_index(__snake_case )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 29 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowercase__ ( __snake_case : List[Any] , __snake_case : List[str]=False ):
'''simple docstring'''
try:
UpperCAmelCase_ : int = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCAmelCase_ : Optional[int] = default
else:
# KEY is set, convert it to True or False.
try:
UpperCAmelCase_ : List[Any] = strtobool(__snake_case )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
__UpperCAmelCase = parse_flag_from_env('RUN_SLOW', default=False)
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
return unittest.skip('Test was skipped' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(__snake_case )
def lowercase__ ( __snake_case : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(__snake_case )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(__snake_case )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(__snake_case )
def lowercase__ ( __snake_case : Dict=None , __snake_case : Dict=None ):
'''simple docstring'''
if test_case is None:
return partial(__snake_case , version=__snake_case )
return unittest.skipUnless(is_torch_version('>=' , __snake_case ) , F"test requires torch version >= {version}" )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(__snake_case )
__UpperCAmelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(__snake_case )
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
_snake_case : Union[str, Any] = True
@classmethod
def __UpperCAmelCase ( cls ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = tempfile.mkdtemp()
@classmethod
def __UpperCAmelCase ( cls ) -> List[str]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __UpperCAmelCase ( self ) -> str:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_UpperCamelCase )
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Optional[int]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : List[Any] = mocks if isinstance(_UpperCamelCase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = AcceleratorState()
UpperCAmelCase_ : str = tensor[None].clone().to(state.device )
UpperCAmelCase_ : List[str] = gather(__snake_case ).cpu()
UpperCAmelCase_ : List[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __snake_case ):
return False
return True
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : str = returncode
UpperCAmelCase_ : Optional[Any] = stdout
UpperCAmelCase_ : Optional[Any] = stderr
async def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Optional[int] ):
'''simple docstring'''
while True:
UpperCAmelCase_ : Dict = await stream.readline()
if line:
callback(__snake_case )
else:
break
async def lowercase__ ( __snake_case : Optional[int] , __snake_case : Dict=None , __snake_case : str=None , __snake_case : Dict=None , __snake_case : List[str]=False , __snake_case : Optional[int]=False ):
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__snake_case , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__snake_case , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : str = []
def tee(__snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Optional[int]="" ):
UpperCAmelCase_ : List[str] = line.decode('utf-8' ).rstrip()
sink.append(__snake_case )
if not quiet:
print(__snake_case , __snake_case , file=__snake_case )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __snake_case : tee(__snake_case , __snake_case , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __snake_case : tee(__snake_case , __snake_case , sys.stderr , label='stderr:' ) ) ),
] , timeout=__snake_case , )
return _RunOutput(await p.wait() , __snake_case , __snake_case )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[Any]=None , __snake_case : str=None , __snake_case : Tuple=180 , __snake_case : Dict=False , __snake_case : Optional[Any]=True ):
'''simple docstring'''
UpperCAmelCase_ : str = asyncio.get_event_loop()
UpperCAmelCase_ : int = loop.run_until_complete(
_stream_subprocess(__snake_case , env=__snake_case , stdin=__snake_case , timeout=__snake_case , quiet=__snake_case , echo=__snake_case ) )
UpperCAmelCase_ : int = ' '.join(__snake_case )
if result.returncode > 0:
UpperCAmelCase_ : int = '\n'.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class lowerCamelCase (_snake_case ):
'''simple docstring'''
pass
def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any]=False ):
'''simple docstring'''
try:
UpperCAmelCase_ : List[Any] = subprocess.check_output(__snake_case , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__snake_case , 'decode' ):
UpperCAmelCase_ : str = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(__snake_case )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 29 | 1 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__UpperCAmelCase = (720, 1280) # Height, Width
__UpperCAmelCase = (0.4, 0.6) # if height or width lower than this scale, drop it.
__UpperCAmelCase = 1 / 100
__UpperCAmelCase = ''
__UpperCAmelCase = ''
__UpperCAmelCase = ''
__UpperCAmelCase = 250
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = get_dataset(__snake_case , __snake_case )
for index in range(__snake_case ):
UpperCAmelCase_ : Optional[int] = random.sample(range(len(__snake_case ) ) , 4 )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = update_image_and_anno(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , filter_scale=__snake_case , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCAmelCase_ : Optional[Any] = random_chars(32 )
UpperCAmelCase_ : Optional[int] = path.split(os.sep )[-1].rsplit('.' , 1 )[0]
UpperCAmelCase_ : Optional[int] = F"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"
cva.imwrite(F"{file_root}.jpg" , __snake_case , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}" )
UpperCAmelCase_ : Optional[Any] = []
for anno in new_annos:
UpperCAmelCase_ : str = anno[3] - anno[1]
UpperCAmelCase_ : Any = anno[4] - anno[2]
UpperCAmelCase_ : Any = anno[1] + width / 2
UpperCAmelCase_ : Dict = anno[2] + height / 2
UpperCAmelCase_ : Union[str, Any] = F"{anno[0]} {x_center} {y_center} {width} {height}"
annos_list.append(__snake_case )
with open(F"{file_root}.txt" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def lowercase__ ( __snake_case : str , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : Dict = []
for label_file in glob.glob(os.path.join(__snake_case , '*.txt' ) ):
UpperCAmelCase_ : Union[str, Any] = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(__snake_case ) as in_file:
UpperCAmelCase_ : str = in_file.readlines()
UpperCAmelCase_ : Optional[int] = os.path.join(__snake_case , F"{label_name}.jpg" )
UpperCAmelCase_ : List[Any] = []
for obj_list in obj_lists:
UpperCAmelCase_ : str = obj_list.rstrip('\n' ).split(' ' )
UpperCAmelCase_ : str = float(obj[1] ) - float(obj[3] ) / 2
UpperCAmelCase_ : Optional[Any] = float(obj[2] ) - float(obj[4] ) / 2
UpperCAmelCase_ : Any = float(obj[1] ) + float(obj[3] ) / 2
UpperCAmelCase_ : Dict = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(__snake_case )
labels.append(__snake_case )
return img_paths, labels
def lowercase__ ( __snake_case : list , __snake_case : list , __snake_case : list[int] , __snake_case : tuple[int, int] , __snake_case : tuple[float, float] , __snake_case : float = 0.0 , ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
UpperCAmelCase_ : List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
UpperCAmelCase_ : List[str] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
UpperCAmelCase_ : Any = int(scale_x * output_size[1] )
UpperCAmelCase_ : List[str] = int(scale_y * output_size[0] )
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = []
for i, index in enumerate(__snake_case ):
UpperCAmelCase_ : Any = all_img_list[index]
path_list.append(__snake_case )
UpperCAmelCase_ : Optional[Any] = all_annos[index]
UpperCAmelCase_ : Tuple = cva.imread(__snake_case )
if i == 0: # top-left
UpperCAmelCase_ : Any = cva.resize(__snake_case , (divid_point_x, divid_point_y) )
UpperCAmelCase_ : str = img
for bbox in img_annos:
UpperCAmelCase_ : Optional[int] = bbox[1] * scale_x
UpperCAmelCase_ : str = bbox[2] * scale_y
UpperCAmelCase_ : Dict = bbox[3] * scale_x
UpperCAmelCase_ : Optional[int] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
UpperCAmelCase_ : Optional[int] = cva.resize(__snake_case , (output_size[1] - divid_point_x, divid_point_y) )
UpperCAmelCase_ : Dict = img
for bbox in img_annos:
UpperCAmelCase_ : Any = scale_x + bbox[1] * (1 - scale_x)
UpperCAmelCase_ : Dict = bbox[2] * scale_y
UpperCAmelCase_ : Dict = scale_x + bbox[3] * (1 - scale_x)
UpperCAmelCase_ : Any = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
UpperCAmelCase_ : Dict = cva.resize(__snake_case , (divid_point_x, output_size[0] - divid_point_y) )
UpperCAmelCase_ : Union[str, Any] = img
for bbox in img_annos:
UpperCAmelCase_ : str = bbox[1] * scale_x
UpperCAmelCase_ : Union[str, Any] = scale_y + bbox[2] * (1 - scale_y)
UpperCAmelCase_ : str = bbox[3] * scale_x
UpperCAmelCase_ : Optional[Any] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
UpperCAmelCase_ : Optional[Any] = cva.resize(
__snake_case , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
UpperCAmelCase_ : List[Any] = img
for bbox in img_annos:
UpperCAmelCase_ : List[str] = scale_x + bbox[1] * (1 - scale_x)
UpperCAmelCase_ : int = scale_y + bbox[2] * (1 - scale_y)
UpperCAmelCase_ : str = scale_x + bbox[3] * (1 - scale_x)
UpperCAmelCase_ : Tuple = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
UpperCAmelCase_ : int = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
UpperCAmelCase_ : int = ascii_lowercase + digits
return "".join(random.choice(__snake_case ) for _ in range(__snake_case ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 29 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__UpperCAmelCase = logging.getLogger(__name__)
def lowercase__ ( __snake_case : List[Any]=2 , __snake_case : Union[str, Any]=3 , __snake_case : Any=16 , __snake_case : int = 10 , __snake_case : int = 2 ):
'''simple docstring'''
def get_dataset(__snake_case : Optional[Any] ):
UpperCAmelCase_ : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCAmelCase_ : Any = get_dataset(__snake_case )
UpperCAmelCase_ : str = get_dataset(__snake_case )
UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowercase__ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple=None ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = []
for epoch in range(__snake_case ):
# Train quickly
model.train()
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = batch
UpperCAmelCase_ : List[Any] = model(__snake_case )
UpperCAmelCase_ : int = torch.nn.functional.mse_loss(__snake_case , __snake_case )
accelerator.backward(__snake_case )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class lowerCamelCase (nn.Module ):
'''simple docstring'''
def __init__( self ) -> Optional[Any]:
super().__init__()
UpperCAmelCase_ : List[Any] = nn.Parameter(torch.randn(1 ) )
UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.randn(1 ) )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[Any]:
return x * self.a + self.b
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders()
UpperCAmelCase_ : Optional[int] = ProjectConfiguration(total_limit=1 , project_dir=_UpperCamelCase , automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : Dict = Accelerator(project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Optional[Any] = DummyModel()
UpperCAmelCase_ : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders()
# Train baseline
UpperCAmelCase_ : Tuple = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
UpperCAmelCase_ : Any = os.path.join(_UpperCamelCase , 'initial' )
accelerator.save_state(_UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase_ : Dict = optimizer.state_dict()
UpperCAmelCase_ : Union[str, Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Union[str, Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Any = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase_ : int = DummyModel()
UpperCAmelCase_ : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : str = dummy_dataloaders()
UpperCAmelCase_ : Optional[Any] = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
accelerator.load_state(_UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : List[str] = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Dict = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save everything
UpperCAmelCase_ : Union[str, Any] = os.path.join(_UpperCamelCase , 'checkpoint' )
accelerator.save_state(_UpperCamelCase )
# Load everything back in and make sure all states work
accelerator.load_state(_UpperCamelCase )
test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dummy_dataloaders()
UpperCAmelCase_ : Any = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : str = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
UpperCAmelCase_ : Optional[Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Tuple = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase_ : Any = DummyModel()
UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders()
UpperCAmelCase_ : Tuple = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) )
((UpperCAmelCase_) , (UpperCAmelCase_)) : str = model.a.item(), model.b.item()
UpperCAmelCase_ : List[Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : List[Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Dict = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Optional[Any] = torch.tensor([1, 2, 3] )
UpperCAmelCase_ : Any = torch.tensor([2, 3, 4] )
UpperCAmelCase_ : Union[str, Any] = DummyModel()
UpperCAmelCase_ : List[str] = torch.optim.Adam(net.parameters() )
UpperCAmelCase_ : Any = Accelerator()
with self.assertRaises(_UpperCamelCase ) as ve:
accelerator.register_for_checkpointing(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[int] = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : int = DummyModel()
UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ : Dict = torch.optim.lr_scheduler.StepLR(_UpperCamelCase , step_size=1 , gamma=0.99 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders()
UpperCAmelCase_ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : Tuple = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
UpperCAmelCase_ : Dict = scheduler.state_dict()
train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
self.assertNotEqual(_UpperCamelCase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(_UpperCamelCase , scheduler.state_dict() )
def __UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Optional[int] = DummyModel()
UpperCAmelCase_ : Dict = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase , total_limit=2 )
# Train baseline
UpperCAmelCase_ : Optional[int] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ : str = accelerator.prepare(_UpperCamelCase )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : List[str] = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
__UpperCAmelCase = '/tmp/accelerate/state_checkpointing'
__UpperCAmelCase = DummyModel()
__UpperCAmelCase = torch.optim.Adam(params=model.parameters(), lr=1E-3)
__UpperCAmelCase = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
__UpperCAmelCase , __UpperCAmelCase = dummy_dataloaders()
__UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__UpperCAmelCase = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert param_device.type == accelerator.device.type
__UpperCAmelCase = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 29 | 1 |
import heapq
def lowercase__ ( __snake_case : dict ):
'''simple docstring'''
UpperCAmelCase_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(__snake_case , [-1 * len(__snake_case ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCAmelCase_ : List[str] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCAmelCase_ : Any = heapq.heappop(__snake_case )[1][0]
chosen_vertices.add(__snake_case )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCAmelCase_ : Tuple = elem[1][1].index(__snake_case )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(__snake_case )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
| 29 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> None:
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
| 29 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[Any] = '''distilbert'''
_snake_case : Dict = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , _UpperCamelCase=3_0_5_2_2 , _UpperCamelCase=5_1_2 , _UpperCamelCase=False , _UpperCamelCase=6 , _UpperCamelCase=1_2 , _UpperCamelCase=7_6_8 , _UpperCamelCase=4 * 7_6_8 , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase="gelu" , _UpperCamelCase=0.02 , _UpperCamelCase=0.1 , _UpperCamelCase=0.2 , _UpperCamelCase=0 , **_UpperCamelCase , ) -> Any:
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Optional[int] = max_position_embeddings
UpperCAmelCase_ : Tuple = sinusoidal_pos_embds
UpperCAmelCase_ : Tuple = n_layers
UpperCAmelCase_ : Optional[int] = n_heads
UpperCAmelCase_ : Optional[int] = dim
UpperCAmelCase_ : str = hidden_dim
UpperCAmelCase_ : Tuple = dropout
UpperCAmelCase_ : Optional[int] = attention_dropout
UpperCAmelCase_ : Optional[Any] = activation
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Tuple = qa_dropout
UpperCAmelCase_ : List[str] = seq_classif_dropout
super().__init__(**_UpperCamelCase , pad_token_id=_UpperCamelCase )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase_ : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCAmelCase_ : Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 29 |
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if not head:
return True
# split the list to two parts
UpperCAmelCase_ , UpperCAmelCase_ : Any = head.next, head
while fast and fast.next:
UpperCAmelCase_ : str = fast.next.next
UpperCAmelCase_ : Union[str, Any] = slow.next
UpperCAmelCase_ : int = slow.next
UpperCAmelCase_ : List[Any] = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase_ : Tuple = None
while second:
UpperCAmelCase_ : int = second.next
UpperCAmelCase_ : Any = node
UpperCAmelCase_ : Optional[Any] = second
UpperCAmelCase_ : Tuple = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase_ : Optional[Any] = node.next
UpperCAmelCase_ : Dict = head.next
return True
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase_ : Any = head
while fast and fast.next:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase_ : List[str] = [slow.val]
while slow.next:
UpperCAmelCase_ : List[str] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase_ : int = cur.next
return True
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if not head or not head.next:
return True
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : int = 0
while head:
if head.val in d:
d[head.val].append(__snake_case )
else:
UpperCAmelCase_ : List[Any] = [pos]
UpperCAmelCase_ : Any = head.next
pos += 1
UpperCAmelCase_ : Dict = pos - 1
UpperCAmelCase_ : Optional[int] = 0
for v in d.values():
if len(__snake_case ) % 2 != 0:
middle += 1
else:
UpperCAmelCase_ : int = 0
for i in range(0 , len(__snake_case ) ):
if v[i] + v[len(__snake_case ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 29 | 1 |
from PIL import Image
def lowercase__ ( __snake_case : Image , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = (259 * (level + 255)) / (255 * (259 - level))
def contrast(__snake_case : int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(__snake_case )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
__UpperCAmelCase = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 29 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 | 1 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
@parameterized.expand([(None,), ('foo.json',)] )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = GenerationConfig(
do_sample=_UpperCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_UpperCamelCase , config_name=_UpperCamelCase )
UpperCAmelCase_ : str = GenerationConfig.from_pretrained(_UpperCamelCase , config_name=_UpperCamelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , _UpperCamelCase )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0 )
self.assertEqual(loaded_config.max_length , 2_0 )
self.assertEqual(loaded_config.max_time , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : Optional[int] = AutoConfig.from_pretrained('gpt2' )
UpperCAmelCase_ : Tuple = GenerationConfig.from_model_config(_UpperCamelCase )
UpperCAmelCase_ : int = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(_UpperCamelCase , _UpperCamelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : List[str] = GenerationConfig()
UpperCAmelCase_ : int = {
'max_new_tokens': 1_0_2_4,
'foo': 'bar',
}
UpperCAmelCase_ : List[Any] = copy.deepcopy(_UpperCamelCase )
UpperCAmelCase_ : Tuple = generation_config.update(**_UpperCamelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(_UpperCamelCase , {'foo': 'bar'} )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : int = GenerationConfig()
UpperCAmelCase_ : Union[str, Any] = 'bar'
with tempfile.TemporaryDirectory('test-generation-config' ) as tmp_dir:
generation_config.save_pretrained(_UpperCamelCase )
UpperCAmelCase_ : List[str] = GenerationConfig.from_pretrained(_UpperCamelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , 'bar' )
UpperCAmelCase_ : Tuple = GenerationConfig.from_model_config(_UpperCamelCase )
assert not hasattr(_UpperCamelCase , 'foo' ) # no new kwargs should be initialized if from config
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : List[Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , _UpperCamelCase )
self.assertEqual(default_config.num_beams , 1 )
UpperCAmelCase_ : List[Any] = GenerationConfig(
do_sample=_UpperCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , _UpperCamelCase )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_UpperCamelCase )
UpperCAmelCase_ : str = GenerationConfig.from_pretrained(_UpperCamelCase , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , _UpperCamelCase )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
@classmethod
def __UpperCAmelCase ( cls ) -> Optional[int]:
UpperCAmelCase_ : Dict = TOKEN
HfFolder.save_token(_UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls ) -> List[Any]:
try:
delete_repo(token=cls._token , repo_id='test-generation-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org' )
except HTTPError:
pass
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = GenerationConfig(
do_sample=_UpperCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('test-generation-config' , use_auth_token=self._token )
UpperCAmelCase_ : Optional[int] = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-generation-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_UpperCamelCase , repo_id='test-generation-config' , push_to_hub=_UpperCamelCase , use_auth_token=self._token )
UpperCAmelCase_ : Optional[int] = GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : List[str] = GenerationConfig(
do_sample=_UpperCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token )
UpperCAmelCase_ : Union[str, Any] = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_UpperCamelCase , repo_id='valid_org/test-generation-config-org' , push_to_hub=_UpperCamelCase , use_auth_token=self._token )
UpperCAmelCase_ : Union[str, Any] = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
| 29 |
__UpperCAmelCase = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 29 | 1 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def lowercase__ ( __snake_case : int , __snake_case : List[str] , __snake_case : str=8 ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase_ : List[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Any:
super().__init__()
self.register_modules(
unet=_UpperCamelCase , scheduler=_UpperCamelCase , movq=_UpperCamelCase , )
UpperCAmelCase_ : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
if latents is None:
UpperCAmelCase_ : List[Any] = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
UpperCAmelCase_ : Optional[int] = latents.to(_UpperCamelCase )
UpperCAmelCase_ : str = latents * scheduler.init_noise_sigma
return latents
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase_ : Optional[int] = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase_ : List[str] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> List[str]:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase_ : Any = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = cpu_offload_with_hook(_UpperCamelCase , _UpperCamelCase , prev_module_hook=_UpperCamelCase )
# We'll offload the last model manually.
UpperCAmelCase_ : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self ) -> int:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_UpperCamelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 1_0_0 , _UpperCamelCase = 4.0 , _UpperCamelCase = 1 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> Optional[int]:
UpperCAmelCase_ : str = self._execution_device
UpperCAmelCase_ : Union[str, Any] = guidance_scale > 1.0
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Optional[Any] = torch.cat(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[int] = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : Optional[Any] = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Dict = negative_image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_UpperCamelCase )
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = self.scheduler.timesteps
UpperCAmelCase_ : Any = self.unet.config.in_channels
UpperCAmelCase_ , UpperCAmelCase_ : Dict = downscale_height_and_width(_UpperCamelCase , _UpperCamelCase , self.movq_scale_factor )
# create initial latent
UpperCAmelCase_ : List[str] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : Optional[int] = {'image_embeds': image_embeds}
UpperCAmelCase_ : Union[str, Any] = self.unet(
sample=_UpperCamelCase , timestep=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , added_cond_kwargs=_UpperCamelCase , return_dict=_UpperCamelCase , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = noise_pred.chunk(2 )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = variance_pred.chunk(2 )
UpperCAmelCase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : Dict = self.scheduler.step(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase , )[0]
# post-processing
UpperCAmelCase_ : Dict = self.movq.decode(_UpperCamelCase , force_not_quantize=_UpperCamelCase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCAmelCase_ : Any = image * 0.5 + 0.5
UpperCAmelCase_ : Optional[Any] = image.clamp(0 , 1 )
UpperCAmelCase_ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : Tuple = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCamelCase )
| 29 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : "DiagonalGaussianDistribution"
class lowerCamelCase (_snake_case , _snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = True
@register_to_config
def __init__( self , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = ("DownEncoderBlock2D",) , _UpperCamelCase = ("UpDecoderBlock2D",) , _UpperCamelCase = (6_4,) , _UpperCamelCase = 1 , _UpperCamelCase = "silu" , _UpperCamelCase = 4 , _UpperCamelCase = 3_2 , _UpperCamelCase = 3_2 , _UpperCamelCase = 0.1_82_15 , ) -> List[Any]:
super().__init__()
# pass init params to Encoder
UpperCAmelCase_ : List[str] = Encoder(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , down_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , act_fn=_UpperCamelCase , norm_num_groups=_UpperCamelCase , double_z=_UpperCamelCase , )
# pass init params to Decoder
UpperCAmelCase_ : Dict = Decoder(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , up_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , norm_num_groups=_UpperCamelCase , act_fn=_UpperCamelCase , )
UpperCAmelCase_ : Any = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
UpperCAmelCase_ : List[Any] = nn.Convad(_UpperCamelCase , _UpperCamelCase , 1 )
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : int = False
# only relevant if vae tiling is enabled
UpperCAmelCase_ : Optional[int] = self.config.sample_size
UpperCAmelCase_ : int = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
UpperCAmelCase_ : Union[str, Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
UpperCAmelCase_ : Optional[Any] = 0.25
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False ) -> List[str]:
if isinstance(_UpperCamelCase , (Encoder, Decoder) ):
UpperCAmelCase_ : Union[str, Any] = value
def __UpperCAmelCase ( self , _UpperCamelCase = True ) -> int:
UpperCAmelCase_ : Tuple = use_tiling
def __UpperCAmelCase ( self ) -> Dict:
self.enable_tiling(_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : str = True
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __UpperCAmelCase ( self ) -> Dict[str, AttentionProcessor]:
UpperCAmelCase_ : Optional[int] = {}
def fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if hasattr(_UpperCamelCase , 'set_processor' ):
UpperCAmelCase_ : Optional[int] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return processors
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = len(self.attn_processors.keys() )
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(_UpperCamelCase )} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if hasattr(_UpperCamelCase , 'set_processor' ):
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
module.set_processor(_UpperCamelCase )
else:
module.set_processor(processor.pop(f"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_UpperCamelCase , return_dict=_UpperCamelCase )
if self.use_slicing and x.shape[0] > 1:
UpperCAmelCase_ : Union[str, Any] = [self.encoder(_UpperCamelCase ) for x_slice in x.split(1 )]
UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase )
else:
UpperCAmelCase_ : List[Any] = self.encoder(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = self.quant_conv(_UpperCamelCase )
UpperCAmelCase_ : Tuple = DiagonalGaussianDistribution(_UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_UpperCamelCase , return_dict=_UpperCamelCase )
UpperCAmelCase_ : str = self.post_quant_conv(_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.decoder(_UpperCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
@apply_forward_hook
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
UpperCAmelCase_ : List[str] = [self._decode(_UpperCamelCase ).sample for z_slice in z.split(1 )]
UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase )
else:
UpperCAmelCase_ : Any = self._decode(_UpperCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : Tuple = min(a.shape[2] , b.shape[2] , _UpperCamelCase )
for y in range(_UpperCamelCase ):
UpperCAmelCase_ : str = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Tuple = min(a.shape[3] , b.shape[3] , _UpperCamelCase )
for x in range(_UpperCamelCase ):
UpperCAmelCase_ : int = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput:
UpperCAmelCase_ : Any = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : Tuple = int(self.tile_latent_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : Optional[int] = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
UpperCAmelCase_ : List[str] = []
for i in range(0 , x.shape[2] , _UpperCamelCase ):
UpperCAmelCase_ : Any = []
for j in range(0 , x.shape[3] , _UpperCamelCase ):
UpperCAmelCase_ : Any = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
UpperCAmelCase_ : Dict = self.encoder(_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.quant_conv(_UpperCamelCase )
row.append(_UpperCamelCase )
rows.append(_UpperCamelCase )
UpperCAmelCase_ : str = []
for i, row in enumerate(_UpperCamelCase ):
UpperCAmelCase_ : List[Any] = []
for j, tile in enumerate(_UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Dict = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase )
if j > 0:
UpperCAmelCase_ : List[str] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) )
UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=2 )
UpperCAmelCase_ : List[Any] = DiagonalGaussianDistribution(_UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : str = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : Dict = int(self.tile_sample_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : Dict = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
UpperCAmelCase_ : Union[str, Any] = []
for i in range(0 , z.shape[2] , _UpperCamelCase ):
UpperCAmelCase_ : List[str] = []
for j in range(0 , z.shape[3] , _UpperCamelCase ):
UpperCAmelCase_ : List[str] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
UpperCAmelCase_ : Optional[Any] = self.post_quant_conv(_UpperCamelCase )
UpperCAmelCase_ : Tuple = self.decoder(_UpperCamelCase )
row.append(_UpperCamelCase )
rows.append(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = []
for i, row in enumerate(_UpperCamelCase ):
UpperCAmelCase_ : List[Any] = []
for j, tile in enumerate(_UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Union[str, Any] = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase )
if j > 0:
UpperCAmelCase_ : Optional[Any] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) )
UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : Optional[Any] = sample
UpperCAmelCase_ : Union[str, Any] = self.encode(_UpperCamelCase ).latent_dist
if sample_posterior:
UpperCAmelCase_ : str = posterior.sample(generator=_UpperCamelCase )
else:
UpperCAmelCase_ : int = posterior.mode()
UpperCAmelCase_ : Dict = self.decode(_UpperCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
| 29 | 1 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : Union[str, Any] = IFImgaImgSuperResolutionPipeline
_snake_case : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
_snake_case : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
_snake_case : List[str] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __UpperCAmelCase ( self ) -> Optional[Any]:
return self._get_superresolution_dummy_components()
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> Any:
if str(_UpperCamelCase ).startswith('mps' ):
UpperCAmelCase_ : List[Any] = torch.manual_seed(_UpperCamelCase )
else:
UpperCAmelCase_ : int = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
UpperCAmelCase_ : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
UpperCAmelCase_ : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCAmelCase ( self ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __UpperCAmelCase ( self ) -> Dict:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __UpperCAmelCase ( self ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __UpperCAmelCase ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self._test_save_load_local()
def __UpperCAmelCase ( self ) -> Dict:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 29 |
def lowercase__ ( __snake_case : int , __snake_case : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCAmelCase_ : Tuple = str(bin(__snake_case ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : Union[str, Any] = str(bin(__snake_case ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : List[Any] = max(len(__snake_case ) , len(__snake_case ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(__snake_case ) , b_binary.zfill(__snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__UpperCAmelCase = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
__UpperCAmelCase = {
'gpt-neox-20b': 2048,
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : str = VOCAB_FILES_NAMES
_snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase="<|endoftext|>" , _UpperCamelCase="<|endoftext|>" , _UpperCamelCase="<|endoftext|>" , _UpperCamelCase=False , **_UpperCamelCase , ) -> Union[str, Any]:
super().__init__(
_UpperCamelCase , _UpperCamelCase , tokenizer_file=_UpperCamelCase , unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , add_prefix_space=_UpperCamelCase , **_UpperCamelCase , )
UpperCAmelCase_ : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , _UpperCamelCase ) != add_prefix_space:
UpperCAmelCase_ : Optional[int] = getattr(_UpperCamelCase , pre_tok_state.pop('type' ) )
UpperCAmelCase_ : Optional[int] = add_prefix_space
UpperCAmelCase_ : Dict = pre_tok_class(**_UpperCamelCase )
UpperCAmelCase_ : Any = add_prefix_space
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
UpperCAmelCase_ : Dict = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[int]:
UpperCAmelCase_ : Tuple = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase ) + [self.eos_token_id] )
if len(_UpperCamelCase ) > self.model_max_length:
UpperCAmelCase_ : int = input_ids[-self.model_max_length :]
return input_ids
| 29 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'vocab.txt'}
__UpperCAmelCase = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__UpperCAmelCase = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__UpperCAmelCase = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = VOCAB_FILES_NAMES
_snake_case : int = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Dict = PRETRAINED_INIT_CONFIGURATION
_snake_case : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Any = ConvBertTokenizer
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase="[UNK]" , _UpperCamelCase="[SEP]" , _UpperCamelCase="[PAD]" , _UpperCamelCase="[CLS]" , _UpperCamelCase="[MASK]" , _UpperCamelCase=True , _UpperCamelCase=None , **_UpperCamelCase , ) -> Dict:
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , **_UpperCamelCase , )
UpperCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _UpperCamelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ : Any = getattr(_UpperCamelCase , normalizer_state.pop('type' ) )
UpperCAmelCase_ : str = do_lower_case
UpperCAmelCase_ : List[Any] = strip_accents
UpperCAmelCase_ : str = tokenize_chinese_chars
UpperCAmelCase_ : Tuple = normalizer_class(**_UpperCamelCase )
UpperCAmelCase_ : Any = do_lower_case
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=None ) -> List[str]:
UpperCAmelCase_ : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
UpperCAmelCase_ : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
UpperCAmelCase_ : Any = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 29 | 1 |
import math
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCAmelCase_ : int = range(3 , int(math.sqrt(__snake_case ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def lowercase__ ( __snake_case : Dict , __snake_case : Tuple=1 , **__snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = factor * value
UpperCAmelCase_ : List[Any] = value
while not is_prime(__snake_case ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **__snake_case )
return value
| 29 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'snap-research/efficientformer-l1-300': (
'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'
),
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = '''efficientformer'''
def __init__( self , _UpperCamelCase = [3, 2, 6, 4] , _UpperCamelCase = [4_8, 9_6, 2_2_4, 4_4_8] , _UpperCamelCase = [True, True, True, True] , _UpperCamelCase = 4_4_8 , _UpperCamelCase = 3_2 , _UpperCamelCase = 4 , _UpperCamelCase = 7 , _UpperCamelCase = 5 , _UpperCamelCase = 8 , _UpperCamelCase = 4 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1_6 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 2 , _UpperCamelCase = 1 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1 , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1E-5 , _UpperCamelCase = "gelu" , _UpperCamelCase = 0.02 , _UpperCamelCase = 1E-12 , _UpperCamelCase = 2_2_4 , _UpperCamelCase = 1E-05 , **_UpperCamelCase , ) -> None:
super().__init__(**_UpperCamelCase )
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase_ : Tuple = hidden_sizes
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : List[str] = patch_size
UpperCAmelCase_ : Union[str, Any] = num_channels
UpperCAmelCase_ : Optional[Any] = depths
UpperCAmelCase_ : List[Any] = mlp_expansion_ratio
UpperCAmelCase_ : List[str] = downsamples
UpperCAmelCase_ : List[Any] = dim
UpperCAmelCase_ : Tuple = key_dim
UpperCAmelCase_ : Optional[int] = attention_ratio
UpperCAmelCase_ : str = resolution
UpperCAmelCase_ : Dict = pool_size
UpperCAmelCase_ : Union[str, Any] = downsample_patch_size
UpperCAmelCase_ : List[str] = downsample_stride
UpperCAmelCase_ : List[str] = downsample_pad
UpperCAmelCase_ : Any = drop_path_rate
UpperCAmelCase_ : Dict = num_metaad_blocks
UpperCAmelCase_ : Dict = distillation
UpperCAmelCase_ : int = use_layer_scale
UpperCAmelCase_ : Any = layer_scale_init_value
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : Dict = batch_norm_eps
| 29 | 1 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
__UpperCAmelCase = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : str , __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
for attribute in key.split('.' ):
UpperCAmelCase_ : List[Any] = getattr(__snake_case , __snake_case )
if weight_type is not None:
UpperCAmelCase_ : Any = getattr(__snake_case , __snake_case ).shape
else:
UpperCAmelCase_ : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
UpperCAmelCase_ : List[str] = value
elif weight_type == "weight_g":
UpperCAmelCase_ : Optional[int] = value
elif weight_type == "weight_v":
UpperCAmelCase_ : str = value
elif weight_type == "bias":
UpperCAmelCase_ : Tuple = value
else:
UpperCAmelCase_ : Optional[Any] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def lowercase__ ( __snake_case : Optional[int] , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : List[Any] = fairseq_model.state_dict()
UpperCAmelCase_ : int = hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase_ : Any = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == 'group' , )
UpperCAmelCase_ : int = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
UpperCAmelCase_ : Optional[Any] = True
if "*" in mapped_key:
UpperCAmelCase_ : int = name.split(__snake_case )[0].split('.' )[-2]
UpperCAmelCase_ : Dict = mapped_key.replace('*' , __snake_case )
if "weight_g" in name:
UpperCAmelCase_ : Optional[Any] = 'weight_g'
elif "weight_v" in name:
UpperCAmelCase_ : List[Any] = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
UpperCAmelCase_ : int = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase_ : Optional[int] = 'weight'
else:
UpperCAmelCase_ : Union[str, Any] = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F"Unused weights: {unused_weights}" )
def lowercase__ ( __snake_case : Any , __snake_case : Tuple , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = full_name.split('conv_layers.' )[-1]
UpperCAmelCase_ : List[Any] = name.split('.' )
UpperCAmelCase_ : Dict = int(items[0] )
UpperCAmelCase_ : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
UpperCAmelCase_ : int = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
UpperCAmelCase_ : str = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
UpperCAmelCase_ : Optional[Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
UpperCAmelCase_ : List[Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Tuple=None ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = torch.load(__snake_case )
UpperCAmelCase_ : List[Any] = WavLMConfigOrig(checkpoint['cfg'] )
UpperCAmelCase_ : Union[str, Any] = WavLMOrig(__snake_case )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
UpperCAmelCase_ : Optional[Any] = WavLMConfig.from_pretrained(__snake_case )
else:
UpperCAmelCase_ : Union[str, Any] = WavLMConfig()
UpperCAmelCase_ : Tuple = WavLMModel(__snake_case )
recursively_load_weights(__snake_case , __snake_case )
hf_wavlm.save_pretrained(__snake_case )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__UpperCAmelCase = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 29 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Union[PIL.Image.Image, np.ndarray]
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Any:
super().__init__()
self.register_modules(
prior=_UpperCamelCase , image_encoder=_UpperCamelCase , image_processor=_UpperCamelCase , scheduler=_UpperCamelCase , renderer=_UpperCamelCase , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
if latents is None:
UpperCAmelCase_ : str = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
UpperCAmelCase_ : Tuple = latents.to(_UpperCamelCase )
UpperCAmelCase_ : Tuple = latents * scheduler.init_noise_sigma
return latents
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase_ : int = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase_ : int = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
@property
def __UpperCAmelCase ( self ) -> int:
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_UpperCamelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> str:
if isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , torch.Tensor ):
UpperCAmelCase_ : int = torch.cat(_UpperCamelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(_UpperCamelCase , axis=0 )
if not isinstance(_UpperCamelCase , torch.Tensor ):
UpperCAmelCase_ : Optional[int] = self.image_processor(_UpperCamelCase , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
UpperCAmelCase_ : Tuple = image.to(dtype=self.image_encoder.dtype , device=_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = self.image_encoder(_UpperCamelCase )['last_hidden_state']
UpperCAmelCase_ : Union[str, Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
UpperCAmelCase_ : List[str] = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : Dict = torch.zeros_like(_UpperCamelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase_ : Optional[int] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = 2_5 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 4.0 , _UpperCamelCase = 6_4 , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> Union[str, Any]:
if isinstance(_UpperCamelCase , PIL.Image.Image ):
UpperCAmelCase_ : Tuple = 1
elif isinstance(_UpperCamelCase , torch.Tensor ):
UpperCAmelCase_ : str = image.shape[0]
elif isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
UpperCAmelCase_ : Optional[int] = len(_UpperCamelCase )
else:
raise ValueError(
f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_UpperCamelCase )}" )
UpperCAmelCase_ : Tuple = self._execution_device
UpperCAmelCase_ : str = batch_size * num_images_per_prompt
UpperCAmelCase_ : str = guidance_scale > 1.0
UpperCAmelCase_ : str = self._encode_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# prior
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
UpperCAmelCase_ : int = self.scheduler.timesteps
UpperCAmelCase_ : int = self.prior.config.num_embeddings
UpperCAmelCase_ : Any = self.prior.config.embedding_dim
UpperCAmelCase_ : List[str] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
UpperCAmelCase_ : List[Any] = latents.reshape(latents.shape[0] , _UpperCamelCase , _UpperCamelCase )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : Optional[Any] = self.scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : int = self.prior(
_UpperCamelCase , timestep=_UpperCamelCase , proj_embedding=_UpperCamelCase , ).predicted_image_embedding
# remove the variance
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 )
UpperCAmelCase_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
UpperCAmelCase_ : List[str] = self.scheduler.step(
_UpperCamelCase , timestep=_UpperCamelCase , sample=_UpperCamelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = []
for i, latent in enumerate(_UpperCamelCase ):
print()
UpperCAmelCase_ : List[str] = self.renderer.decode(
latent[None, :] , _UpperCamelCase , size=_UpperCamelCase , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = torch.stack(_UpperCamelCase )
if output_type not in ["np", "pil"]:
raise ValueError(f"Only the output types `pil` and `np` are supported not output_type={output_type}" )
UpperCAmelCase_ : Dict = images.cpu().numpy()
if output_type == "pil":
UpperCAmelCase_ : List[str] = [self.numpy_to_pil(_UpperCamelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_UpperCamelCase )
| 29 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCAmelCase = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['MaskFormerFeatureExtractor']
__UpperCAmelCase = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
__UpperCAmelCase = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 29 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : Union[str, Any] = IFImgaImgSuperResolutionPipeline
_snake_case : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
_snake_case : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
_snake_case : List[str] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __UpperCAmelCase ( self ) -> Optional[Any]:
return self._get_superresolution_dummy_components()
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> Any:
if str(_UpperCamelCase ).startswith('mps' ):
UpperCAmelCase_ : List[Any] = torch.manual_seed(_UpperCamelCase )
else:
UpperCAmelCase_ : int = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
UpperCAmelCase_ : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
UpperCAmelCase_ : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCAmelCase ( self ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __UpperCAmelCase ( self ) -> Dict:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __UpperCAmelCase ( self ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __UpperCAmelCase ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self._test_save_load_local()
def __UpperCAmelCase ( self ) -> Dict:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 29 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCAmelCase = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 | 1 |
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def lowercase__ ( __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = np.max(_outputs , axis=-1 , keepdims=__snake_case )
UpperCAmelCase_ : Optional[Any] = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__snake_case )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : str = '''sigmoid'''
_snake_case : Union[str, Any] = '''softmax'''
_snake_case : Tuple = '''none'''
@add_end_docstrings(
_snake_case , R'''
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.
''' , )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : str = False
_snake_case : Dict = ClassificationFunction.NONE
def __init__( self , **_UpperCamelCase ) -> Tuple:
super().__init__(**_UpperCamelCase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def __UpperCAmelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase="" , **_UpperCamelCase ) -> Dict:
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
UpperCAmelCase_ : Dict = tokenizer_kwargs
UpperCAmelCase_ : Any = {}
if hasattr(self.model.config , 'return_all_scores' ) and return_all_scores is None:
UpperCAmelCase_ : Any = self.model.config.return_all_scores
if isinstance(_UpperCamelCase , _UpperCamelCase ) or top_k is None:
UpperCAmelCase_ : str = top_k
UpperCAmelCase_ : int = False
elif return_all_scores is not None:
warnings.warn(
'`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'
' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , _UpperCamelCase , )
if return_all_scores:
UpperCAmelCase_ : List[Any] = None
else:
UpperCAmelCase_ : Optional[Any] = 1
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Dict = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
UpperCAmelCase_ : Union[str, Any] = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : Tuple = super().__call__(*_UpperCamelCase , **_UpperCamelCase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
UpperCAmelCase_ : List[str] = 'top_k' not in kwargs
if isinstance(args[0] , _UpperCamelCase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def __UpperCAmelCase ( self , _UpperCamelCase , **_UpperCamelCase ) -> Dict[str, GenericTensor]:
UpperCAmelCase_ : int = self.framework
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return self.tokenizer(**_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) == 1 and isinstance(inputs[0] , _UpperCamelCase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=_UpperCamelCase , **_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'
' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.' )
return self.tokenizer(_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Tuple:
return self.model(**_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=1 , _UpperCamelCase=True ) -> str:
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
UpperCAmelCase_ : Optional[int] = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
UpperCAmelCase_ : Any = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , 'function_to_apply' ) and function_to_apply is None:
UpperCAmelCase_ : List[Any] = self.model.config.function_to_apply
else:
UpperCAmelCase_ : Optional[int] = ClassificationFunction.NONE
UpperCAmelCase_ : Union[str, Any] = model_outputs['logits'][0]
UpperCAmelCase_ : Optional[int] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
UpperCAmelCase_ : int = sigmoid(_UpperCamelCase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
UpperCAmelCase_ : Optional[int] = softmax(_UpperCamelCase )
elif function_to_apply == ClassificationFunction.NONE:
UpperCAmelCase_ : List[str] = outputs
else:
raise ValueError(f"Unrecognized `function_to_apply` argument: {function_to_apply}" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
UpperCAmelCase_ : Union[str, Any] = [
{'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(_UpperCamelCase )
]
if not _legacy:
dict_scores.sort(key=lambda _UpperCamelCase : x["score"] , reverse=_UpperCamelCase )
if top_k is not None:
UpperCAmelCase_ : Optional[Any] = dict_scores[:top_k]
return dict_scores
| 29 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase=None , **_UpperCamelCase ) -> Dict:
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
UpperCAmelCase_ : Any = model
UpperCAmelCase_ : int = kwargs.get('model_save_dir' , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = kwargs.get('latest_model_name' , _UpperCamelCase )
def __call__( self , **_UpperCamelCase ) -> str:
UpperCAmelCase_ : Optional[int] = {k: np.array(_UpperCamelCase ) for k, v in kwargs.items()}
return self.model.run(_UpperCamelCase , _UpperCamelCase )
@staticmethod
def __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ) -> List[Any]:
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
UpperCAmelCase_ : List[str] = 'CPUExecutionProvider'
return ort.InferenceSession(_UpperCamelCase , providers=[provider] , sess_options=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME
UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name )
UpperCAmelCase_ : str = Path(_UpperCamelCase ).joinpath(_UpperCamelCase )
try:
shutil.copyfile(_UpperCamelCase , _UpperCamelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(_UpperCamelCase )
if src_path.exists():
UpperCAmelCase_ : List[Any] = Path(_UpperCamelCase ).joinpath(_UpperCamelCase )
try:
shutil.copyfile(_UpperCamelCase , _UpperCamelCase )
except shutil.SameFileError:
pass
def __UpperCAmelCase ( self , _UpperCamelCase , **_UpperCamelCase , ) -> List[str]:
if os.path.isfile(_UpperCamelCase ):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file" )
return
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
# saving model weights/files
self._save_pretrained(_UpperCamelCase , **_UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> List[str]:
UpperCAmelCase_ : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(
os.path.join(_UpperCamelCase , _UpperCamelCase ) , provider=_UpperCamelCase , sess_options=_UpperCamelCase )
UpperCAmelCase_ : Tuple = Path(_UpperCamelCase )
# load model from hub
else:
# download model
UpperCAmelCase_ : List[str] = hf_hub_download(
repo_id=_UpperCamelCase , filename=_UpperCamelCase , use_auth_token=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , )
UpperCAmelCase_ : Union[str, Any] = Path(_UpperCamelCase ).parent
UpperCAmelCase_ : List[str] = Path(_UpperCamelCase ).name
UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(_UpperCamelCase , provider=_UpperCamelCase , sess_options=_UpperCamelCase )
return cls(model=_UpperCamelCase , **_UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> Optional[int]:
UpperCAmelCase_ : List[str] = None
if len(str(_UpperCamelCase ).split('@' ) ) == 2:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = model_id.split('@' )
return cls._from_pretrained(
model_id=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , use_auth_token=_UpperCamelCase , **_UpperCamelCase , )
| 29 | 1 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def lowercase__ ( __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int ):
'''simple docstring'''
if (ksize % 2) == 0:
UpperCAmelCase_ : Optional[Any] = ksize + 1
UpperCAmelCase_ : Optional[int] = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__snake_case ):
for x in range(__snake_case ):
# distance from center
UpperCAmelCase_ : Dict = x - ksize // 2
UpperCAmelCase_ : str = y - ksize // 2
# degree to radiant
UpperCAmelCase_ : Union[str, Any] = theta / 180 * np.pi
UpperCAmelCase_ : Any = np.cos(_theta )
UpperCAmelCase_ : Tuple = np.sin(_theta )
# get kernel x
UpperCAmelCase_ : Any = cos_theta * px + sin_theta * py
# get kernel y
UpperCAmelCase_ : str = -sin_theta * px + cos_theta * py
# fill kernel
UpperCAmelCase_ : List[str] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__UpperCAmelCase = imread('../image_data/lena.jpg')
# turn image in gray scale value
__UpperCAmelCase = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__UpperCAmelCase = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__UpperCAmelCase = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__UpperCAmelCase = out / out.max() * 255
__UpperCAmelCase = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0)
| 29 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = 10
UpperCAmelCase_ : Tuple = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
UpperCAmelCase_ : Tuple = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(__snake_case ) ),
} , features=__snake_case , )
return dataset
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : str = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=__snake_case )
return filename
# FILE_CONTENT + files
__UpperCAmelCase = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt'
UpperCAmelCase_ : Tuple = FILE_CONTENT
with open(__snake_case , 'w' ) as f:
f.write(__snake_case )
return filename
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
import bza
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
UpperCAmelCase_ : str = bytes(__snake_case , 'utf-8' )
with bza.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
UpperCAmelCase_ : Dict = bytes(__snake_case , 'utf-8' )
with gzip.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' )
with lza.frame.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : List[Any] ):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(__snake_case , 'w' ) as archive:
archive.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any] ):
'''simple docstring'''
import tarfile
UpperCAmelCase_ : Any = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
import lzma
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
UpperCAmelCase_ : Any = bytes(__snake_case , 'utf-8' )
with lzma.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[int] , __snake_case : Optional[Any] ):
'''simple docstring'''
import zipfile
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
UpperCAmelCase_ : Tuple = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
UpperCAmelCase_ : List[str] = bytes(__snake_case , 'utf-8' )
with zstd.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'file.xml'
UpperCAmelCase_ : List[Any] = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(__snake_case , 'w' ) as f:
f.write(__snake_case )
return filename
__UpperCAmelCase = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
__UpperCAmelCase = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
__UpperCAmelCase = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
__UpperCAmelCase = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
__UpperCAmelCase = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = datasets.Dataset.from_dict(__snake_case )
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(__snake_case ) ) as con:
UpperCAmelCase_ : List[Any] = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(__snake_case , 'w' , newline='' ) as f:
UpperCAmelCase_ : Tuple = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(__snake_case , 'w' , newline='' ) as f:
UpperCAmelCase_ : Optional[Any] = csv.DictWriter(__snake_case , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Any ):
'''simple docstring'''
import bza
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(__snake_case , 'rb' ) as f:
UpperCAmelCase_ : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__snake_case , 'wb' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[str] , __snake_case : Tuple , __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Optional[int] , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(__snake_case , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : int , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
UpperCAmelCase_ : Dict = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(__snake_case , 'wb' ) as f:
UpperCAmelCase_ : List[Any] = pq.ParquetWriter(__snake_case , schema=__snake_case )
UpperCAmelCase_ : Any = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__snake_case ) )] for k in DATA[0]} , schema=__snake_case )
writer.write_table(__snake_case )
writer.close()
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
UpperCAmelCase_ : Optional[int] = {'data': DATA}
with open(__snake_case , 'w' ) as f:
json.dump(__snake_case , __snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
UpperCAmelCase_ : Tuple = {'data': DATA_DICT_OF_LISTS}
with open(__snake_case , 'w' ) as f:
json.dump(__snake_case , __snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(__snake_case , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(__snake_case ) + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : Dict ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(__snake_case , 'rb' ) as orig_file:
with gzip.open(__snake_case , 'wb' ) as zipped_file:
zipped_file.writelines(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : int , __snake_case : Any ):
'''simple docstring'''
import gzip
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(__snake_case , 'rb' ) as orig_file:
with gzip.open(__snake_case , 'wb' ) as zipped_file:
zipped_file.writelines(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : int = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : str , __snake_case : Dict , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : str = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple , __snake_case : str , __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
f.add(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : Any , __snake_case : Any , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(__snake_case , 'w' ) as f:
f.add(__snake_case , arcname=os.path.join('nested' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Any = ['0', '1', '2', '3']
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = ['0', '1', '2', '3']
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ['0', '1', '2', '3']
UpperCAmelCase_ : List[str] = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(__snake_case , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict , __snake_case : str , __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
f.write(__snake_case , arcname=os.path.join('main_dir' , os.path.basename(__snake_case ) ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename('unsupported.ext' ) )
f.write(__snake_case , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(__snake_case , 'w' , encoding='utf-8' ) as f:
f.write(__snake_case )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def lowercase__ ( ):
'''simple docstring'''
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : str , __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(__snake_case , 'w' ) as f:
f.write(__snake_case , arcname=os.path.basename(__snake_case ) )
f.write(__snake_case , arcname=os.path.basename(__snake_case ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def lowercase__ ( __snake_case : Any ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 29 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__UpperCAmelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Dict = ['''pixel_values''']
def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = 1 / 2_5_5 , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = True , **_UpperCamelCase , ) -> None:
super().__init__(**_UpperCamelCase )
UpperCAmelCase_ : str = size if size is not None else {'shortest_edge': 2_2_4}
UpperCAmelCase_ : Any = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
UpperCAmelCase_ : Dict = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
UpperCAmelCase_ : Union[str, Any] = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase , param_name='crop_size' )
UpperCAmelCase_ : Any = do_resize
UpperCAmelCase_ : List[str] = size
UpperCAmelCase_ : Optional[Any] = resample
UpperCAmelCase_ : Dict = do_center_crop
UpperCAmelCase_ : Union[str, Any] = crop_size
UpperCAmelCase_ : Tuple = do_rescale
UpperCAmelCase_ : Union[str, Any] = rescale_factor
UpperCAmelCase_ : Optional[int] = do_normalize
UpperCAmelCase_ : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase_ : Dict = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase_ : Tuple = do_convert_rgb
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = None , **_UpperCamelCase , ) -> np.ndarray:
UpperCAmelCase_ : Union[str, Any] = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
UpperCAmelCase_ : Union[str, Any] = get_resize_output_image_size(_UpperCamelCase , size=size['shortest_edge'] , default_to_square=_UpperCamelCase )
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ) -> np.ndarray:
UpperCAmelCase_ : str = get_size_dict(_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(_UpperCamelCase , size=(size['height'], size['width']) , data_format=_UpperCamelCase , **_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ) -> Union[str, Any]:
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ) -> np.ndarray:
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ) -> PIL.Image.Image:
UpperCAmelCase_ : Any = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : Optional[int] = size if size is not None else self.size
UpperCAmelCase_ : Tuple = get_size_dict(_UpperCamelCase , param_name='size' , default_to_square=_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = resample if resample is not None else self.resample
UpperCAmelCase_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : str = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : Any = get_size_dict(_UpperCamelCase , param_name='crop_size' , default_to_square=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : Union[str, Any] = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : int = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase_ : List[Any] = make_list_of_images(_UpperCamelCase )
if not valid_images(_UpperCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase_ : Dict = [convert_to_rgb(_UpperCamelCase ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase_ : List[str] = [to_numpy_array(_UpperCamelCase ) for image in images]
if do_resize:
UpperCAmelCase_ : Union[str, Any] = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
if do_center_crop:
UpperCAmelCase_ : int = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase ) for image in images]
if do_rescale:
UpperCAmelCase_ : List[Any] = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_normalize:
UpperCAmelCase_ : Optional[Any] = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images]
UpperCAmelCase_ : Optional[Any] = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
UpperCAmelCase_ : Tuple = {'pixel_values': images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase )
| 29 |
from __future__ import annotations
def lowercase__ ( __snake_case : tuple[int, int] , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position
UpperCAmelCase_ : str = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCAmelCase_ : Optional[Any] = []
for position in positions:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__snake_case )
return permissible_positions
def lowercase__ ( __snake_case : list[list[int]] ):
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def lowercase__ ( __snake_case : list[list[int]] , __snake_case : tuple[int, int] , __snake_case : int ):
'''simple docstring'''
if is_complete(__snake_case ):
return True
for position in get_valid_pos(__snake_case , len(__snake_case ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Any = position
if board[y][x] == 0:
UpperCAmelCase_ : Optional[Any] = curr + 1
if open_knight_tour_helper(__snake_case , __snake_case , curr + 1 ):
return True
UpperCAmelCase_ : List[Any] = 0
return False
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : str = [[0 for i in range(__snake_case )] for j in range(__snake_case )]
for i in range(__snake_case ):
for j in range(__snake_case ):
UpperCAmelCase_ : Optional[Any] = 1
if open_knight_tour_helper(__snake_case , (i, j) , 1 ):
return board
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : List[str] = F"Open Kight Tour cannot be performed on a board of size {n}"
raise ValueError(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 1 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> None:
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
| 29 |
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : list[list[int]] = [[0 for _ in range(__snake_case )] for _ in range(m + 1 )]
for i in range(m + 1 ):
UpperCAmelCase_ : Optional[Any] = 1
for n in range(m + 1 ):
for k in range(1 , __snake_case ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__UpperCAmelCase = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 29 | 1 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
requires_backends(self , 'decord' )
self.check_model_type(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None ) -> Dict:
UpperCAmelCase_ : Optional[Any] = {}
if frame_sampling_rate is not None:
UpperCAmelCase_ : Optional[Any] = frame_sampling_rate
if num_frames is not None:
UpperCAmelCase_ : Optional[Any] = num_frames
UpperCAmelCase_ : Optional[Any] = {}
if top_k is not None:
UpperCAmelCase_ : List[Any] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _UpperCamelCase , **_UpperCamelCase ) -> Union[str, Any]:
return super().__call__(_UpperCamelCase , **_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=1 ) -> Optional[int]:
if num_frames is None:
UpperCAmelCase_ : Optional[Any] = self.model.config.num_frames
if video.startswith('http://' ) or video.startswith('https://' ):
UpperCAmelCase_ : List[str] = BytesIO(requests.get(_UpperCamelCase ).content )
UpperCAmelCase_ : Optional[Any] = VideoReader(_UpperCamelCase )
videoreader.seek(0 )
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : Union[str, Any] = num_frames * frame_sampling_rate - 1
UpperCAmelCase_ : Optional[int] = np.linspace(_UpperCamelCase , _UpperCamelCase , num=_UpperCamelCase , dtype=np.intaa )
UpperCAmelCase_ : List[Any] = videoreader.get_batch(_UpperCamelCase ).asnumpy()
UpperCAmelCase_ : Union[str, Any] = list(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = self.image_processor(_UpperCamelCase , return_tensors=self.framework )
return model_inputs
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : Dict = self.model(**_UpperCamelCase )
return model_outputs
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=5 ) -> List[str]:
if top_k > self.model.config.num_labels:
UpperCAmelCase_ : List[Any] = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ : Dict = model_outputs.logits.softmax(-1 )[0]
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = probs.topk(_UpperCamelCase )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
UpperCAmelCase_ : str = scores.tolist()
UpperCAmelCase_ : Any = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase )]
| 29 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int:
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
self.check_model_type(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = {}, {}
if padding is not None:
UpperCAmelCase_ : List[str] = padding
if truncation is not None:
UpperCAmelCase_ : Tuple = truncation
if top_k is not None:
UpperCAmelCase_ : Dict = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> int:
if isinstance(_UpperCamelCase , (Image.Image, str) ) and isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Optional[Any] = {'image': image, 'question': question}
else:
UpperCAmelCase_ : List[str] = image
UpperCAmelCase_ : Optional[Any] = super().__call__(_UpperCamelCase , **_UpperCamelCase )
return results
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = load_image(inputs['image'] )
UpperCAmelCase_ : Dict = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=_UpperCamelCase , truncation=_UpperCamelCase )
UpperCAmelCase_ : int = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework )
model_inputs.update(_UpperCamelCase )
return model_inputs
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : Any = self.model(**_UpperCamelCase )
return model_outputs
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=5 ) -> str:
if top_k > self.model.config.num_labels:
UpperCAmelCase_ : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ : List[str] = model_outputs.logits.sigmoid()[0]
UpperCAmelCase_ , UpperCAmelCase_ : str = probs.topk(_UpperCamelCase )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
UpperCAmelCase_ : Optional[Any] = scores.tolist()
UpperCAmelCase_ : Tuple = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase )]
| 29 | 1 |
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__UpperCAmelCase = 4
__UpperCAmelCase = 3
class lowerCamelCase (_snake_case ):
'''simple docstring'''
pass
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
for shard in shards:
for i in range(__snake_case ):
yield {"i": i, "shard": shard}
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = int(os.environ['RANK'] )
UpperCAmelCase_ : int = int(os.environ['WORLD_SIZE'] )
UpperCAmelCase_ : Dict = ArgumentParser()
parser.add_argument('--streaming' , type=__snake_case )
parser.add_argument('--local_rank' , type=__snake_case )
parser.add_argument('--num_workers' , type=__snake_case , default=0 )
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
UpperCAmelCase_ : Optional[int] = args.streaming
UpperCAmelCase_ : Optional[Any] = args.num_workers
UpperCAmelCase_ : Any = {'shards': [F"shard_{shard_idx}" for shard_idx in range(__snake_case )]}
UpperCAmelCase_ : List[Any] = IterableDataset.from_generator(__snake_case , gen_kwargs=__snake_case )
if not streaming:
UpperCAmelCase_ : Optional[int] = Dataset.from_list(list(__snake_case ) )
UpperCAmelCase_ : List[Any] = split_dataset_by_node(__snake_case , rank=__snake_case , world_size=__snake_case )
UpperCAmelCase_ : str = torch.utils.data.DataLoader(__snake_case , num_workers=__snake_case )
UpperCAmelCase_ : List[str] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
UpperCAmelCase_ : str = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
UpperCAmelCase_ : Optional[Any] = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F"local_size {local_size} != expected_local_size {expected_local_size}" )
if __name__ == "__main__":
main()
| 29 |
import os
# Precomputes a list of the 100 first triangular numbers
__UpperCAmelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Any = os.path.dirname(os.path.realpath(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = os.path.join(__snake_case , 'words.txt' )
UpperCAmelCase_ : Union[str, Any] = ''
with open(__snake_case ) as f:
UpperCAmelCase_ : List[Any] = f.readline()
UpperCAmelCase_ : Optional[int] = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
UpperCAmelCase_ : Optional[int] = [
word
for word in [sum(ord(__snake_case ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__snake_case )
if __name__ == "__main__":
print(solution())
| 29 | 1 |
from math import factorial, radians
def lowercase__ ( __snake_case : float , __snake_case : int = 18 , __snake_case : int = 10 ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
UpperCAmelCase_ : Optional[Any] = radians(__snake_case )
UpperCAmelCase_ : Optional[int] = angle_in_radians
UpperCAmelCase_ : Any = 3
UpperCAmelCase_ : List[str] = -1
for _ in range(__snake_case ):
result += (b * (angle_in_radians**a)) / factorial(__snake_case )
UpperCAmelCase_ : Dict = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(__snake_case , __snake_case )
if __name__ == "__main__":
__import__('doctest').testmod()
| 29 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__UpperCAmelCase = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__UpperCAmelCase = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
if "://" in dataset_path:
UpperCAmelCase_ : int = dataset_path.split('://' )[1]
return dataset_path
def lowercase__ ( __snake_case : fsspec.AbstractFileSystem ):
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowercase__ ( __snake_case : fsspec.AbstractFileSystem , __snake_case : str , __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = not is_remote_filesystem(__snake_case )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__snake_case ) , fs._strip_protocol(__snake_case ) )
else:
fs.mv(__snake_case , __snake_case , recursive=__snake_case )
def lowercase__ ( ):
'''simple docstring'''
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : int = threading.Lock()
| 29 | 1 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase (_snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : int = CLIPTokenizer
_snake_case : Tuple = CLIPTokenizerFast
_snake_case : List[Any] = True
_snake_case : Dict = {}
_snake_case : Optional[int] = False
def __UpperCAmelCase ( self ) -> Tuple:
super().setUp()
# fmt: off
UpperCAmelCase_ : str = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
UpperCAmelCase_ : Optional[int] = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
UpperCAmelCase_ : Tuple = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
UpperCAmelCase_ : Any = {'unk_token': '<unk>'}
UpperCAmelCase_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_UpperCamelCase ) )
def __UpperCAmelCase ( self , **_UpperCamelCase ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __UpperCAmelCase ( self , **_UpperCamelCase ) -> Any:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : List[str] = 'lower newer'
UpperCAmelCase_ : Any = 'lower newer'
return input_text, output_text
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : List[Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase_ : Tuple = 'lower newer'
UpperCAmelCase_ : Any = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
UpperCAmelCase_ : Optional[int] = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = tokens + [tokenizer.unk_token]
UpperCAmelCase_ : List[Any] = [1_0, 2, 1_6, 9, 3, 2, 1_6, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , _UpperCamelCase )
@require_ftfy
def __UpperCAmelCase ( self ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : int = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
UpperCAmelCase_ : str = tokenizer_s.tokenize(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = tokenizer_r.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCAmelCase_ : Any = 'xa\u0303y' + ' ' + 'x\xe3y'
UpperCAmelCase_ : Optional[int] = tokenizer_s.tokenize(_UpperCamelCase )
UpperCAmelCase_ : Any = tokenizer_r.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
# Test that the tokenization is identical on unicode of space type
UpperCAmelCase_ : int = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCAmelCase_ : Optional[Any] = tokenizer_s.tokenize(_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = tokenizer_r.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
# Test that the tokenization is identical on unicode of line break type
UpperCAmelCase_ : str = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCAmelCase_ : Any = tokenizer_s.tokenize(_UpperCamelCase )
UpperCAmelCase_ : int = tokenizer_r.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase_ : str = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
UpperCAmelCase_ : Optional[int] = f"{text_of_1_token} {text_of_1_token}"
UpperCAmelCase_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , )
UpperCAmelCase_ : int = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ) + 1, len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
UpperCAmelCase_ : Optional[int] = f" {text}"
UpperCAmelCase_ : Tuple = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , )
UpperCAmelCase_ : int = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCamelCase ) + 1, 1 + len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
def __UpperCAmelCase ( self ) -> Any:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(_UpperCamelCase ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def __UpperCAmelCase ( self ) -> Optional[Any]:
super().test_tokenization_python_rust_equals()
def __UpperCAmelCase ( self ) -> List[Any]:
# CLIP always lower cases letters
pass
| 29 |
def lowercase__ ( __snake_case : list ):
'''simple docstring'''
for i in range(len(__snake_case ) - 1 , 0 , -1 ):
UpperCAmelCase_ : Dict = False
for j in range(__snake_case , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Any = unsorted[j - 1], unsorted[j]
UpperCAmelCase_ : int = True
for j in range(__snake_case ):
if unsorted[j] > unsorted[j + 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = unsorted[j + 1], unsorted[j]
UpperCAmelCase_ : Any = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip()
__UpperCAmelCase = [int(item) for item in user_input.split(',')]
print(F'{cocktail_shaker_sort(unsorted) = }')
| 29 | 1 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> None:
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
| 29 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def lowercase__ ( __snake_case : List[str] , __snake_case : int , __snake_case : Tuple=8 ):
'''simple docstring'''
UpperCAmelCase_ : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase_ : List[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowercase__ ( __snake_case : Any , __snake_case : int=512 , __snake_case : Dict=512 ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
UpperCAmelCase_ : Dict = np.array(pil_image.convert('RGB' ) )
UpperCAmelCase_ : Any = arr.astype(np.floataa ) / 127.5 - 1
UpperCAmelCase_ : Dict = np.transpose(__snake_case , [2, 0, 1] )
UpperCAmelCase_ : List[str] = torch.from_numpy(__snake_case ).unsqueeze(0 )
return image
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
unet=_UpperCamelCase , scheduler=_UpperCamelCase , movq=_UpperCamelCase , )
UpperCAmelCase_ : Tuple = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
# get the original timestep using init_timestep
UpperCAmelCase_ : Any = min(int(num_inference_steps * strength ) , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase_ : str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ) -> Tuple:
if not isinstance(_UpperCamelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_UpperCamelCase )}" )
UpperCAmelCase_ : List[str] = image.to(device=_UpperCamelCase , dtype=_UpperCamelCase )
UpperCAmelCase_ : List[str] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCAmelCase_ : List[str] = image
else:
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_UpperCamelCase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Any = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCamelCase )
]
UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase , dim=0 )
else:
UpperCAmelCase_ : Union[str, Any] = self.movq.encode(_UpperCamelCase ).latent_dist.sample(_UpperCamelCase )
UpperCAmelCase_ : int = self.movq.config.scaling_factor * init_latents
UpperCAmelCase_ : Optional[int] = torch.cat([init_latents] , dim=0 )
UpperCAmelCase_ : Tuple = init_latents.shape
UpperCAmelCase_ : List[Any] = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
# get latents
UpperCAmelCase_ : str = self.scheduler.add_noise(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = init_latents
return latents
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Any:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
UpperCAmelCase_ : Optional[Any] = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=0 ) -> Union[str, Any]:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
UpperCAmelCase_ : str = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ : Dict = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = cpu_offload_with_hook(_UpperCamelCase , _UpperCamelCase , prev_module_hook=_UpperCamelCase )
# We'll offload the last model manually.
UpperCAmelCase_ : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self ) -> Dict:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_UpperCamelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 5_1_2 , _UpperCamelCase = 1_0_0 , _UpperCamelCase = 4.0 , _UpperCamelCase = 0.3 , _UpperCamelCase = 1 , _UpperCamelCase = None , _UpperCamelCase = "pil" , _UpperCamelCase = True , ) -> str:
UpperCAmelCase_ : Any = self._execution_device
UpperCAmelCase_ : Union[str, Any] = guidance_scale > 1.0
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : str = torch.cat(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[Any] = image_embeds.shape[0]
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : int = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : int = negative_image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
UpperCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_UpperCamelCase )
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Tuple = [image]
if not all(isinstance(_UpperCamelCase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(_UpperCamelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
UpperCAmelCase_ : str = torch.cat([prepare_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) for i in image] , dim=0 )
UpperCAmelCase_ : Any = image.to(dtype=image_embeds.dtype , device=_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.movq.encode(_UpperCamelCase )['latents']
UpperCAmelCase_ : List[Any] = latents.repeat_interleave(_UpperCamelCase , dim=0 )
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.get_timesteps(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCAmelCase_ , UpperCAmelCase_ : str = downscale_height_and_width(_UpperCamelCase , _UpperCamelCase , self.movq_scale_factor )
UpperCAmelCase_ : Dict = self.prepare_latents(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : str = {'image_embeds': image_embeds}
UpperCAmelCase_ : Union[str, Any] = self.unet(
sample=_UpperCamelCase , timestep=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , added_cond_kwargs=_UpperCamelCase , return_dict=_UpperCamelCase , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase_ , UpperCAmelCase_ : str = noise_pred.chunk(2 )
UpperCAmelCase_ , UpperCAmelCase_ : str = variance_pred.chunk(2 )
UpperCAmelCase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ , UpperCAmelCase_ : int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : List[str] = self.scheduler.step(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase , )[0]
# post-processing
UpperCAmelCase_ : Optional[Any] = self.movq.decode(_UpperCamelCase , force_not_quantize=_UpperCamelCase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCAmelCase_ : List[str] = image * 0.5 + 0.5
UpperCAmelCase_ : List[Any] = image.clamp(0 , 1 )
UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : List[Any] = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCamelCase )
| 29 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowercase__ ( __snake_case : List[Any] , __snake_case : List[str]=False ):
'''simple docstring'''
try:
UpperCAmelCase_ : int = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCAmelCase_ : Optional[int] = default
else:
# KEY is set, convert it to True or False.
try:
UpperCAmelCase_ : List[Any] = strtobool(__snake_case )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
__UpperCAmelCase = parse_flag_from_env('RUN_SLOW', default=False)
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
return unittest.skip('Test was skipped' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(__snake_case )
def lowercase__ ( __snake_case : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(__snake_case )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(__snake_case )
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(__snake_case )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(__snake_case )
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(__snake_case )
def lowercase__ ( __snake_case : Dict=None , __snake_case : Dict=None ):
'''simple docstring'''
if test_case is None:
return partial(__snake_case , version=__snake_case )
return unittest.skipUnless(is_torch_version('>=' , __snake_case ) , F"test requires torch version >= {version}" )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(__snake_case )
def lowercase__ ( __snake_case : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(__snake_case )
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(__snake_case )
__UpperCAmelCase = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(__snake_case )
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
_snake_case : Union[str, Any] = True
@classmethod
def __UpperCAmelCase ( cls ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = tempfile.mkdtemp()
@classmethod
def __UpperCAmelCase ( cls ) -> List[str]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __UpperCAmelCase ( self ) -> str:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_UpperCamelCase )
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Optional[int]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : List[Any] = mocks if isinstance(_UpperCamelCase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : int = AcceleratorState()
UpperCAmelCase_ : str = tensor[None].clone().to(state.device )
UpperCAmelCase_ : List[str] = gather(__snake_case ).cpu()
UpperCAmelCase_ : List[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __snake_case ):
return False
return True
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : str = returncode
UpperCAmelCase_ : Optional[Any] = stdout
UpperCAmelCase_ : Optional[Any] = stderr
async def lowercase__ ( __snake_case : Optional[Any] , __snake_case : Optional[int] ):
'''simple docstring'''
while True:
UpperCAmelCase_ : Dict = await stream.readline()
if line:
callback(__snake_case )
else:
break
async def lowercase__ ( __snake_case : Optional[int] , __snake_case : Dict=None , __snake_case : str=None , __snake_case : Dict=None , __snake_case : List[str]=False , __snake_case : Optional[int]=False ):
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__snake_case , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__snake_case , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : str = []
def tee(__snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Optional[int]="" ):
UpperCAmelCase_ : List[str] = line.decode('utf-8' ).rstrip()
sink.append(__snake_case )
if not quiet:
print(__snake_case , __snake_case , file=__snake_case )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __snake_case : tee(__snake_case , __snake_case , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __snake_case : tee(__snake_case , __snake_case , sys.stderr , label='stderr:' ) ) ),
] , timeout=__snake_case , )
return _RunOutput(await p.wait() , __snake_case , __snake_case )
def lowercase__ ( __snake_case : Optional[Any] , __snake_case : List[Any]=None , __snake_case : str=None , __snake_case : Tuple=180 , __snake_case : Dict=False , __snake_case : Optional[Any]=True ):
'''simple docstring'''
UpperCAmelCase_ : str = asyncio.get_event_loop()
UpperCAmelCase_ : int = loop.run_until_complete(
_stream_subprocess(__snake_case , env=__snake_case , stdin=__snake_case , timeout=__snake_case , quiet=__snake_case , echo=__snake_case ) )
UpperCAmelCase_ : int = ' '.join(__snake_case )
if result.returncode > 0:
UpperCAmelCase_ : int = '\n'.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class lowerCamelCase (_snake_case ):
'''simple docstring'''
pass
def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any]=False ):
'''simple docstring'''
try:
UpperCAmelCase_ : List[Any] = subprocess.check_output(__snake_case , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__snake_case , 'decode' ):
UpperCAmelCase_ : str = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(__snake_case )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 29 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'CarlCochet/trajectory-transformer-halfcheetah-medium-v2': (
'https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Tuple = '''trajectory_transformer'''
_snake_case : Tuple = ['''past_key_values''']
_snake_case : int = {
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _UpperCamelCase=1_0_0 , _UpperCamelCase=5 , _UpperCamelCase=1 , _UpperCamelCase=1 , _UpperCamelCase=2_4_9 , _UpperCamelCase=6 , _UpperCamelCase=1_7 , _UpperCamelCase=2_5 , _UpperCamelCase=4 , _UpperCamelCase=4 , _UpperCamelCase=1_2_8 , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.00_06 , _UpperCamelCase=5_1_2 , _UpperCamelCase=0.02 , _UpperCamelCase=1E-12 , _UpperCamelCase=1 , _UpperCamelCase=True , _UpperCamelCase=1 , _UpperCamelCase=5_0_2_5_6 , _UpperCamelCase=5_0_2_5_6 , **_UpperCamelCase , ) -> int:
UpperCAmelCase_ : Dict = vocab_size
UpperCAmelCase_ : Dict = action_weight
UpperCAmelCase_ : str = reward_weight
UpperCAmelCase_ : Any = value_weight
UpperCAmelCase_ : Dict = max_position_embeddings
UpperCAmelCase_ : str = block_size
UpperCAmelCase_ : Any = action_dim
UpperCAmelCase_ : int = observation_dim
UpperCAmelCase_ : Dict = transition_dim
UpperCAmelCase_ : Tuple = learning_rate
UpperCAmelCase_ : Dict = n_layer
UpperCAmelCase_ : Dict = n_head
UpperCAmelCase_ : Union[str, Any] = n_embd
UpperCAmelCase_ : Optional[int] = embd_pdrop
UpperCAmelCase_ : Optional[Any] = attn_pdrop
UpperCAmelCase_ : Any = resid_pdrop
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : Optional[int] = kaiming_initializer_range
UpperCAmelCase_ : Optional[int] = use_cache
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
| 29 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__UpperCAmelCase = logging.getLogger(__name__)
def lowercase__ ( __snake_case : List[Any]=2 , __snake_case : Union[str, Any]=3 , __snake_case : Any=16 , __snake_case : int = 10 , __snake_case : int = 2 ):
'''simple docstring'''
def get_dataset(__snake_case : Optional[Any] ):
UpperCAmelCase_ : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCAmelCase_ : Any = get_dataset(__snake_case )
UpperCAmelCase_ : str = get_dataset(__snake_case )
UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
UpperCAmelCase_ : int = DataLoader(__snake_case , shuffle=__snake_case , batch_size=__snake_case , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowercase__ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Any , __snake_case : Tuple=None ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = []
for epoch in range(__snake_case ):
# Train quickly
model.train()
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = batch
UpperCAmelCase_ : List[Any] = model(__snake_case )
UpperCAmelCase_ : int = torch.nn.functional.mse_loss(__snake_case , __snake_case )
accelerator.backward(__snake_case )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class lowerCamelCase (nn.Module ):
'''simple docstring'''
def __init__( self ) -> Optional[Any]:
super().__init__()
UpperCAmelCase_ : List[Any] = nn.Parameter(torch.randn(1 ) )
UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.randn(1 ) )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[Any]:
return x * self.a + self.b
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders()
UpperCAmelCase_ : Optional[int] = ProjectConfiguration(total_limit=1 , project_dir=_UpperCamelCase , automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : Dict = Accelerator(project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Optional[Any] = DummyModel()
UpperCAmelCase_ : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders()
# Train baseline
UpperCAmelCase_ : Tuple = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
UpperCAmelCase_ : Any = os.path.join(_UpperCamelCase , 'initial' )
accelerator.save_state(_UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase_ : Dict = optimizer.state_dict()
UpperCAmelCase_ : Union[str, Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Union[str, Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Any = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase_ : int = DummyModel()
UpperCAmelCase_ : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : str = dummy_dataloaders()
UpperCAmelCase_ : Optional[Any] = Accelerator()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
accelerator.load_state(_UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : List[str] = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Dict = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save everything
UpperCAmelCase_ : Union[str, Any] = os.path.join(_UpperCamelCase , 'checkpoint' )
accelerator.save_state(_UpperCamelCase )
# Load everything back in and make sure all states work
accelerator.load_state(_UpperCamelCase )
test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Tuple = DummyModel()
UpperCAmelCase_ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dummy_dataloaders()
UpperCAmelCase_ : Any = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : str = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
UpperCAmelCase_ : Optional[Any] = train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : Tuple = model.a.item(), model.b.item()
UpperCAmelCase_ : Optional[int] = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase_ : Any = DummyModel()
UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = dummy_dataloaders()
UpperCAmelCase_ : Tuple = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_UpperCamelCase )
UpperCAmelCase_ : List[Any] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) )
((UpperCAmelCase_) , (UpperCAmelCase_)) : str = model.a.item(), model.b.item()
UpperCAmelCase_ : List[Any] = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = train(2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
((UpperCAmelCase_) , (UpperCAmelCase_)) : List[Any] = model.a.item(), model.b.item()
UpperCAmelCase_ : Dict = optimizer.state_dict()
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Optional[Any] = torch.tensor([1, 2, 3] )
UpperCAmelCase_ : Any = torch.tensor([2, 3, 4] )
UpperCAmelCase_ : Union[str, Any] = DummyModel()
UpperCAmelCase_ : List[str] = torch.optim.Adam(net.parameters() )
UpperCAmelCase_ : Any = Accelerator()
with self.assertRaises(_UpperCamelCase ) as ve:
accelerator.register_for_checkpointing(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Optional[int] = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def __UpperCAmelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : int = DummyModel()
UpperCAmelCase_ : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCAmelCase_ : Dict = torch.optim.lr_scheduler.StepLR(_UpperCamelCase , step_size=1 , gamma=0.99 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = dummy_dataloaders()
UpperCAmelCase_ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase )
# Train baseline
UpperCAmelCase_ : Tuple = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save initial
accelerator.save_state()
UpperCAmelCase_ : Dict = scheduler.state_dict()
train(3 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
self.assertNotEqual(_UpperCamelCase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(_UpperCamelCase , scheduler.state_dict() )
def __UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase_ : Optional[int] = DummyModel()
UpperCAmelCase_ : Dict = ProjectConfiguration(automatic_checkpoint_naming=_UpperCamelCase , total_limit=2 )
# Train baseline
UpperCAmelCase_ : Optional[int] = Accelerator(project_dir=_UpperCamelCase , project_config=_UpperCamelCase )
UpperCAmelCase_ : str = accelerator.prepare(_UpperCamelCase )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : List[str] = ['torchrun', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
__UpperCAmelCase = '/tmp/accelerate/state_checkpointing'
__UpperCAmelCase = DummyModel()
__UpperCAmelCase = torch.optim.Adam(params=model.parameters(), lr=1E-3)
__UpperCAmelCase = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
__UpperCAmelCase , __UpperCAmelCase = dummy_dataloaders()
__UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__UpperCAmelCase = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert param_device.type == accelerator.device.type
__UpperCAmelCase = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 29 | 1 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase=1_3 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=9_9 , _UpperCamelCase=3_2 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=3_7 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=5_0 , _UpperCamelCase=0.02 , _UpperCamelCase=True , _UpperCamelCase=None , ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = parent
UpperCAmelCase_ : Any = batch_size
UpperCAmelCase_ : str = seq_length
UpperCAmelCase_ : Optional[int] = is_training
UpperCAmelCase_ : Optional[int] = use_input_mask
UpperCAmelCase_ : str = vocab_size
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : List[str] = intermediate_size
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : str = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = max_position_embeddings
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : Optional[Any] = use_labels
UpperCAmelCase_ : str = scope
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Optional[Any] = None
if self.use_input_mask:
UpperCAmelCase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : List[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def __UpperCAmelCase ( self ) -> Dict:
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self ) -> Tuple:
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase , ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = BertGenerationEncoder(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : Tuple = model(_UpperCamelCase , attention_mask=_UpperCamelCase )
UpperCAmelCase_ : str = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase , ) -> Tuple:
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : Optional[Any] = BertGenerationEncoder(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : Tuple = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , )
UpperCAmelCase_ : str = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase , ) -> Any:
UpperCAmelCase_ : List[str] = True
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : Optional[int] = BertGenerationDecoder(config=_UpperCamelCase ).to(_UpperCamelCase ).eval()
# first forward pass
UpperCAmelCase_ : List[str] = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , use_cache=_UpperCamelCase , )
UpperCAmelCase_ : List[str] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase_ : Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_ : Optional[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase_ : List[str] = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , output_hidden_states=_UpperCamelCase , )['hidden_states'][0]
UpperCAmelCase_ : List[Any] = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , output_hidden_states=_UpperCamelCase , )['hidden_states'][0]
# select random slice
UpperCAmelCase_ : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ) )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase , ) -> Union[str, Any]:
UpperCAmelCase_ : int = BertGenerationDecoder(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : Tuple = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase (_snake_case , _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : str = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
_snake_case : List[Any] = (BertGenerationDecoder,) if is_torch_available() else ()
_snake_case : int = (
{'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder}
if is_torch_available()
else {}
)
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Tuple = BertGenerationEncoderTester(self )
UpperCAmelCase_ : Tuple = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=3_7 )
def __UpperCAmelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_ : Tuple = 'bert'
self.model_tester.create_and_check_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
# This regression test was failing with PyTorch < 1.3
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase_ : str = None
self.model_tester.create_and_check_model_as_decoder(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*_UpperCamelCase )
@slow
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
self.assertIsNotNone(_UpperCamelCase )
@require_torch
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : Any = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
UpperCAmelCase_ : Optional[int] = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
UpperCAmelCase_ : Dict = model(_UpperCamelCase )[0]
UpperCAmelCase_ : Dict = torch.Size([1, 8, 1_0_2_4] )
self.assertEqual(output.shape , _UpperCamelCase )
UpperCAmelCase_ : Dict = torch.tensor(
[[[0.17_75, 0.00_83, -0.03_21], [1.60_02, 0.12_87, 0.39_12], [2.14_73, 0.57_91, 0.60_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1E-4 ) )
@require_torch
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : str = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
UpperCAmelCase_ : Union[str, Any] = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
UpperCAmelCase_ : str = model(_UpperCamelCase )[0]
UpperCAmelCase_ : str = torch.Size([1, 8, 5_0_3_5_8] )
self.assertEqual(output.shape , _UpperCamelCase )
UpperCAmelCase_ : Dict = torch.tensor(
[[[-0.57_88, -2.59_94, -3.70_54], [0.04_38, 4.79_97, 1.87_95], [1.58_62, 6.64_09, 4.46_38]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1E-4 ) )
| 29 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> None:
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
| 29 | 1 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__UpperCAmelCase = get_tests_dir('fixtures')
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> int:
# A mock response for an HTTP head request to emulate server down
UpperCAmelCase_ : Optional[int] = mock.Mock()
UpperCAmelCase_ : Any = 5_0_0
UpperCAmelCase_ : str = {}
UpperCAmelCase_ : Any = HTTPError
UpperCAmelCase_ : List[str] = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=_UpperCamelCase ) as mock_head:
UpperCAmelCase_ : Tuple = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def __UpperCAmelCase ( self ) -> Tuple:
# This test is for deprecated behavior and can be removed in v5
UpperCAmelCase_ : int = WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
@classmethod
def __UpperCAmelCase ( cls ) -> Tuple:
UpperCAmelCase_ : Any = TOKEN
HfFolder.save_token(_UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls ) -> Optional[int]:
try:
delete_repo(token=cls._token , repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : Any = WavaVecaFeatureExtractor.from_pretrained(_UpperCamelCase )
feature_extractor.push_to_hub('test-feature-extractor' , use_auth_token=self._token )
UpperCAmelCase_ : List[Any] = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_UpperCamelCase , repo_id='test-feature-extractor' , push_to_hub=_UpperCamelCase , use_auth_token=self._token )
UpperCAmelCase_ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(f"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(_UpperCamelCase )
feature_extractor.push_to_hub('valid_org/test-feature-extractor' , use_auth_token=self._token )
UpperCAmelCase_ : Dict = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_UpperCamelCase , repo_id='valid_org/test-feature-extractor-org' , push_to_hub=_UpperCamelCase , use_auth_token=self._token )
UpperCAmelCase_ : List[str] = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
def __UpperCAmelCase ( self ) -> int:
CustomFeatureExtractor.register_for_auto_class()
UpperCAmelCase_ : Optional[int] = CustomFeatureExtractor.from_pretrained(_UpperCamelCase )
feature_extractor.push_to_hub('test-dynamic-feature-extractor' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'} , )
UpperCAmelCase_ : List[Any] = AutoFeatureExtractor.from_pretrained(
f"{USER}/test-dynamic-feature-extractor" , trust_remote_code=_UpperCamelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , 'CustomFeatureExtractor' )
| 29 |
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if not head:
return True
# split the list to two parts
UpperCAmelCase_ , UpperCAmelCase_ : Any = head.next, head
while fast and fast.next:
UpperCAmelCase_ : str = fast.next.next
UpperCAmelCase_ : Union[str, Any] = slow.next
UpperCAmelCase_ : int = slow.next
UpperCAmelCase_ : List[Any] = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase_ : Tuple = None
while second:
UpperCAmelCase_ : int = second.next
UpperCAmelCase_ : Any = node
UpperCAmelCase_ : Optional[Any] = second
UpperCAmelCase_ : Tuple = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase_ : Optional[Any] = node.next
UpperCAmelCase_ : Dict = head.next
return True
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase_ : Any = head
while fast and fast.next:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase_ : List[str] = [slow.val]
while slow.next:
UpperCAmelCase_ : List[str] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase_ : int = cur.next
return True
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if not head or not head.next:
return True
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : int = 0
while head:
if head.val in d:
d[head.val].append(__snake_case )
else:
UpperCAmelCase_ : List[Any] = [pos]
UpperCAmelCase_ : Any = head.next
pos += 1
UpperCAmelCase_ : Dict = pos - 1
UpperCAmelCase_ : Optional[int] = 0
for v in d.values():
if len(__snake_case ) % 2 != 0:
middle += 1
else:
UpperCAmelCase_ : int = 0
for i in range(0 , len(__snake_case ) ):
if v[i] + v[len(__snake_case ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 29 | 1 |
def lowercase__ ( __snake_case : list ):
'''simple docstring'''
for i in range(len(__snake_case ) - 1 , 0 , -1 ):
UpperCAmelCase_ : Dict = False
for j in range(__snake_case , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Any = unsorted[j - 1], unsorted[j]
UpperCAmelCase_ : int = True
for j in range(__snake_case ):
if unsorted[j] > unsorted[j + 1]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = unsorted[j + 1], unsorted[j]
UpperCAmelCase_ : Any = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip()
__UpperCAmelCase = [int(item) for item in user_input.split(',')]
print(F'{cocktail_shaker_sort(unsorted) = }')
| 29 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 | 1 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase=3 , _UpperCamelCase=3_2 , _UpperCamelCase=3 , _UpperCamelCase=1_0 , _UpperCamelCase=[1_0, 2_0, 3_0, 4_0] , _UpperCamelCase=[1, 1, 2, 1] , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase="relu" , _UpperCamelCase=3 , _UpperCamelCase=None , ) -> Union[str, Any]:
UpperCAmelCase_ : Any = parent
UpperCAmelCase_ : List[str] = batch_size
UpperCAmelCase_ : Optional[int] = image_size
UpperCAmelCase_ : Union[str, Any] = num_channels
UpperCAmelCase_ : int = embeddings_size
UpperCAmelCase_ : Optional[int] = hidden_sizes
UpperCAmelCase_ : Optional[int] = depths
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Tuple = use_labels
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : List[Any] = num_labels
UpperCAmelCase_ : Optional[Any] = scope
UpperCAmelCase_ : Any = len(_UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : str = self.get_config()
return config, pixel_values
def __UpperCAmelCase ( self ) -> Optional[int]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Any = FlaxRegNetModel(config=_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = model(_UpperCamelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = self.num_labels
UpperCAmelCase_ : List[Any] = FlaxRegNetForImageClassification(config=_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : str = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ : Any = config_and_inputs
UpperCAmelCase_ : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class lowerCamelCase (_snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : List[str] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
_snake_case : int = False
_snake_case : Optional[Any] = False
_snake_case : Optional[int] = False
def __UpperCAmelCase ( self ) -> None:
UpperCAmelCase_ : List[Any] = FlaxRegNetModelTester(self )
UpperCAmelCase_ : Dict = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self ) -> str:
return
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def __UpperCAmelCase ( self ) -> str:
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def __UpperCAmelCase ( self ) -> Optional[Any]:
pass
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Tuple = model_class(_UpperCamelCase )
UpperCAmelCase_ : Any = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Dict = [*signature.parameters.keys()]
UpperCAmelCase_ : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> str:
def check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Optional[Any] = model_class(_UpperCamelCase )
UpperCAmelCase_ : str = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
UpperCAmelCase_ : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ : Tuple = self.model_tester.num_stages
self.assertEqual(len(_UpperCamelCase ) , expected_num_stages + 1 )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Tuple = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : str = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : Dict = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : Dict = model_class(_UpperCamelCase )
@jax.jit
def model_jitted(_UpperCamelCase , **_UpperCamelCase ):
return model(pixel_values=_UpperCamelCase , **_UpperCamelCase )
with self.subTest('JIT Enabled' ):
UpperCAmelCase_ : Optional[int] = model_jitted(**_UpperCamelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCAmelCase_ : int = model_jitted(**_UpperCamelCase ).to_tuple()
self.assertEqual(len(_UpperCamelCase ) , len(_UpperCamelCase ) )
for jitted_output, output in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self ) -> Optional[Any]:
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : Tuple = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )
UpperCAmelCase_ : Tuple = self.default_image_processor
UpperCAmelCase_ : List[Any] = prepare_img()
UpperCAmelCase_ : Any = image_processor(images=_UpperCamelCase , return_tensors='np' )
UpperCAmelCase_ : int = model(**_UpperCamelCase )
# verify the logits
UpperCAmelCase_ : Union[str, Any] = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
UpperCAmelCase_ : Tuple = jnp.array([-0.41_80, -1.50_51, -3.48_36] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1E-4 ) )
| 29 |
__UpperCAmelCase = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 29 | 1 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase=1_3 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=9_9 , _UpperCamelCase=6_4 , _UpperCamelCase=3_2 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=3_7 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=5_1_2 , _UpperCamelCase=1_6 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=None , ) -> Tuple:
UpperCAmelCase_ : Union[str, Any] = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : Dict = seq_length
UpperCAmelCase_ : str = is_training
UpperCAmelCase_ : str = use_input_mask
UpperCAmelCase_ : Tuple = use_token_type_ids
UpperCAmelCase_ : Optional[Any] = use_labels
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : Optional[int] = embedding_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : Optional[Any] = num_attention_heads
UpperCAmelCase_ : Any = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : Dict = hidden_dropout_prob
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : int = type_vocab_size
UpperCAmelCase_ : Optional[int] = type_sequence_label_size
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : List[Any] = num_labels
UpperCAmelCase_ : str = num_choices
UpperCAmelCase_ : Optional[Any] = scope
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Any = None
if self.use_input_mask:
UpperCAmelCase_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Tuple = None
if self.use_token_type_ids:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self ) -> Optional[Any]:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Dict = MegatronBertModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : Dict = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = model(_UpperCamelCase , token_type_ids=_UpperCamelCase )
UpperCAmelCase_ : Tuple = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = MegatronBertForMaskedLM(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : Tuple = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : Any = MegatronBertForCausalLM(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : List[Any] = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str:
UpperCAmelCase_ : Optional[int] = MegatronBertForNextSentencePrediction(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
UpperCAmelCase_ : Optional[int] = MegatronBertForPreTraining(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : List[str] = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase , next_sentence_label=_UpperCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : Optional[int] = MegatronBertForQuestionAnswering(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : Dict = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = self.num_labels
UpperCAmelCase_ : List[Any] = MegatronBertForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : Any = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str:
UpperCAmelCase_ : Union[str, Any] = self.num_labels
UpperCAmelCase_ : Optional[Any] = MegatronBertForTokenClassification(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : Optional[int] = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Optional[int] = self.num_choices
UpperCAmelCase_ : str = MegatronBertForMultipleChoice(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ : Tuple = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase_ : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : Tuple = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
_snake_case : str = (
{
'''feature-extraction''': MegatronBertModel,
'''fill-mask''': MegatronBertForMaskedLM,
'''question-answering''': MegatronBertForQuestionAnswering,
'''text-classification''': MegatronBertForSequenceClassification,
'''text-generation''': MegatronBertForCausalLM,
'''token-classification''': MegatronBertForTokenClassification,
'''zero-shot''': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case : Any = True
# test_resize_embeddings = False
_snake_case : List[str] = False
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ) -> Any:
UpperCAmelCase_ : Dict = super()._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
if return_labels:
if model_class in get_values(_UpperCamelCase ):
UpperCAmelCase_ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCamelCase )
UpperCAmelCase_ : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCamelCase )
return inputs_dict
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : int = MegatronBertModelTester(self )
UpperCAmelCase_ : Dict = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=3_7 )
def __UpperCAmelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*_UpperCamelCase )
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
return torch.tensor(
__snake_case , dtype=torch.long , device=__snake_case , )
__UpperCAmelCase = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip('Model is not available.' )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : int = 'nvidia/megatron-bert-uncased-345m'
if "MYDIR" in os.environ:
UpperCAmelCase_ : List[str] = os.path.join(os.environ['MYDIR'] , _UpperCamelCase )
UpperCAmelCase_ : Tuple = MegatronBertModel.from_pretrained(_UpperCamelCase )
model.to(_UpperCamelCase )
model.half()
UpperCAmelCase_ : Any = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
UpperCAmelCase_ : str = model(_UpperCamelCase )[0]
UpperCAmelCase_ : List[Any] = torch.Size((1, 9, 1_0_2_4) )
self.assertEqual(output.shape , _UpperCamelCase )
UpperCAmelCase_ : Optional[int] = [-0.60_40, -0.25_17, -0.10_25, 0.34_20, -0.67_58, -0.00_17, -0.10_89, -0.19_90, 0.57_28]
for ii in range(3 ):
for jj in range(3 ):
UpperCAmelCase_ : Optional[int] = output[0, ii, jj]
UpperCAmelCase_ : Any = expected[3 * ii + jj]
UpperCAmelCase_ : List[Any] = 'ii={} jj={} a={} b={}'.format(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
self.assertTrue(math.isclose(_UpperCamelCase , _UpperCamelCase , rel_tol=_UpperCamelCase , abs_tol=_UpperCamelCase ) , msg=_UpperCamelCase )
| 29 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : "DiagonalGaussianDistribution"
class lowerCamelCase (_snake_case , _snake_case ):
'''simple docstring'''
_snake_case : Optional[int] = True
@register_to_config
def __init__( self , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = ("DownEncoderBlock2D",) , _UpperCamelCase = ("UpDecoderBlock2D",) , _UpperCamelCase = (6_4,) , _UpperCamelCase = 1 , _UpperCamelCase = "silu" , _UpperCamelCase = 4 , _UpperCamelCase = 3_2 , _UpperCamelCase = 3_2 , _UpperCamelCase = 0.1_82_15 , ) -> List[Any]:
super().__init__()
# pass init params to Encoder
UpperCAmelCase_ : List[str] = Encoder(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , down_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , act_fn=_UpperCamelCase , norm_num_groups=_UpperCamelCase , double_z=_UpperCamelCase , )
# pass init params to Decoder
UpperCAmelCase_ : Dict = Decoder(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , up_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , norm_num_groups=_UpperCamelCase , act_fn=_UpperCamelCase , )
UpperCAmelCase_ : Any = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
UpperCAmelCase_ : List[Any] = nn.Convad(_UpperCamelCase , _UpperCamelCase , 1 )
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : int = False
# only relevant if vae tiling is enabled
UpperCAmelCase_ : Optional[int] = self.config.sample_size
UpperCAmelCase_ : int = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
UpperCAmelCase_ : Union[str, Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
UpperCAmelCase_ : Optional[Any] = 0.25
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False ) -> List[str]:
if isinstance(_UpperCamelCase , (Encoder, Decoder) ):
UpperCAmelCase_ : Union[str, Any] = value
def __UpperCAmelCase ( self , _UpperCamelCase = True ) -> int:
UpperCAmelCase_ : Tuple = use_tiling
def __UpperCAmelCase ( self ) -> Dict:
self.enable_tiling(_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : str = True
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __UpperCAmelCase ( self ) -> Dict[str, AttentionProcessor]:
UpperCAmelCase_ : Optional[int] = {}
def fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if hasattr(_UpperCamelCase , 'set_processor' ):
UpperCAmelCase_ : Optional[int] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return processors
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = len(self.attn_processors.keys() )
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(_UpperCamelCase )} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if hasattr(_UpperCamelCase , 'set_processor' ):
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
module.set_processor(_UpperCamelCase )
else:
module.set_processor(processor.pop(f"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}" , _UpperCamelCase , _UpperCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_UpperCamelCase , return_dict=_UpperCamelCase )
if self.use_slicing and x.shape[0] > 1:
UpperCAmelCase_ : Union[str, Any] = [self.encoder(_UpperCamelCase ) for x_slice in x.split(1 )]
UpperCAmelCase_ : Tuple = torch.cat(_UpperCamelCase )
else:
UpperCAmelCase_ : List[Any] = self.encoder(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = self.quant_conv(_UpperCamelCase )
UpperCAmelCase_ : Tuple = DiagonalGaussianDistribution(_UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_UpperCamelCase , return_dict=_UpperCamelCase )
UpperCAmelCase_ : str = self.post_quant_conv(_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.decoder(_UpperCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
@apply_forward_hook
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
UpperCAmelCase_ : List[str] = [self._decode(_UpperCamelCase ).sample for z_slice in z.split(1 )]
UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase )
else:
UpperCAmelCase_ : Any = self._decode(_UpperCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : Tuple = min(a.shape[2] , b.shape[2] , _UpperCamelCase )
for y in range(_UpperCamelCase ):
UpperCAmelCase_ : str = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Tuple = min(a.shape[3] , b.shape[3] , _UpperCamelCase )
for x in range(_UpperCamelCase ):
UpperCAmelCase_ : int = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> AutoencoderKLOutput:
UpperCAmelCase_ : Any = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : Tuple = int(self.tile_latent_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : Optional[int] = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
UpperCAmelCase_ : List[str] = []
for i in range(0 , x.shape[2] , _UpperCamelCase ):
UpperCAmelCase_ : Any = []
for j in range(0 , x.shape[3] , _UpperCamelCase ):
UpperCAmelCase_ : Any = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
UpperCAmelCase_ : Dict = self.encoder(_UpperCamelCase )
UpperCAmelCase_ : List[str] = self.quant_conv(_UpperCamelCase )
row.append(_UpperCamelCase )
rows.append(_UpperCamelCase )
UpperCAmelCase_ : str = []
for i, row in enumerate(_UpperCamelCase ):
UpperCAmelCase_ : List[Any] = []
for j, tile in enumerate(_UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Dict = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase )
if j > 0:
UpperCAmelCase_ : List[str] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) )
UpperCAmelCase_ : Union[str, Any] = torch.cat(_UpperCamelCase , dim=2 )
UpperCAmelCase_ : List[Any] = DiagonalGaussianDistribution(_UpperCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : str = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : Dict = int(self.tile_sample_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : Dict = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
UpperCAmelCase_ : Union[str, Any] = []
for i in range(0 , z.shape[2] , _UpperCamelCase ):
UpperCAmelCase_ : List[str] = []
for j in range(0 , z.shape[3] , _UpperCamelCase ):
UpperCAmelCase_ : List[str] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
UpperCAmelCase_ : Optional[Any] = self.post_quant_conv(_UpperCamelCase )
UpperCAmelCase_ : Tuple = self.decoder(_UpperCamelCase )
row.append(_UpperCamelCase )
rows.append(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = []
for i, row in enumerate(_UpperCamelCase ):
UpperCAmelCase_ : List[Any] = []
for j, tile in enumerate(_UpperCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Union[str, Any] = self.blend_v(rows[i - 1][j] , _UpperCamelCase , _UpperCamelCase )
if j > 0:
UpperCAmelCase_ : Optional[Any] = self.blend_h(row[j - 1] , _UpperCamelCase , _UpperCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCamelCase , dim=3 ) )
UpperCAmelCase_ : Dict = torch.cat(_UpperCamelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : Optional[Any] = sample
UpperCAmelCase_ : Union[str, Any] = self.encode(_UpperCamelCase ).latent_dist
if sample_posterior:
UpperCAmelCase_ : str = posterior.sample(generator=_UpperCamelCase )
else:
UpperCAmelCase_ : int = posterior.mode()
UpperCAmelCase_ : Dict = self.decode(_UpperCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
| 29 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.