code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from math import factorial
def UpperCAmelCase ( a_ , a_ ) -> int:
"""simple docstring"""
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(a_ ) // (factorial(a_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
f'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
'If a class of 40 students must be arranged into groups of',
f'''4 for group projects, there are {combinations(40, 4)} ways''',
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
f'''are {combinations(10, 3)} ways that first, second and''',
'third place can be awarded.',
)
| 15 |
SCREAMING_SNAKE_CASE :Any = 256
# Modulus to hash a string
SCREAMING_SNAKE_CASE :Union[str, Any] = 100_0003
def UpperCAmelCase ( a_ , a_ ) -> bool:
"""simple docstring"""
__A = len(a_ )
__A = len(a_ )
if p_len > t_len:
return False
__A = 0
__A = 0
__A = 1
# Calculating the hash of pattern and substring of text
for i in range(a_ ):
__A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__A = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__A = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__A = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def UpperCAmelCase ( ) -> None:
"""simple docstring"""
__A = "abc1abc12"
__A = "alskfjaldsabc1abc1abc12k23adsfabcabc"
__A = "alskfjaldsk23adsfabcabc"
assert rabin_karp(a_ , a_ ) and not rabin_karp(a_ , a_ )
# Test 2)
__A = "ABABX"
__A = "ABABZABABYABABX"
assert rabin_karp(a_ , a_ )
# Test 3)
__A = "AAAB"
__A = "ABAAAAAB"
assert rabin_karp(a_ , a_ )
# Test 4)
__A = "abcdabcy"
__A = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(a_ , a_ )
# Test 5)
__A = "Lü"
__A = "Lüsai"
assert rabin_karp(a_ , a_ )
__A = "Lue"
assert not rabin_karp(a_ , a_ )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 15 | 1 |
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"""files""" , [
["""full:README.md""", """dataset_infos.json"""],
["""empty:README.md""", """dataset_infos.json"""],
["""dataset_infos.json"""],
["""full:README.md"""],
] , )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : int = tmp_path_factory.mktemp("""dset_infos_dir""" )
if "full:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""---\ndataset_info:\n dataset_size: 42\n---""" )
if "empty:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""""" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f:
f.write("""{\"default\": {\"dataset_size\": 42}}""" )
UpperCAmelCase__ : Any = DatasetInfosDict.from_directory(UpperCamelCase__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
"""dataset_info""" , [
DatasetInfo(),
DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=4_2 , ),
] , )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : str = str(UpperCamelCase__ )
dataset_info.write_to_directory(UpperCamelCase__ )
UpperCAmelCase__ : Optional[Any] = DatasetInfo.from_directory(UpperCamelCase__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(UpperCamelCase__ , """dataset_info.json""" ) )
def _UpperCamelCase ( ):
UpperCAmelCase__ : List[str] = DatasetInfo(
description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 4_2}] , download_checksums={} , download_size=1_3_3_7 , post_processing_size=4_4_2 , dataset_size=1_2_3_4 , size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4 , )
UpperCAmelCase__ : Dict = dataset_info._to_yaml_dict()
assert sorted(UpperCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
UpperCAmelCase__ : List[str] = yaml.safe_dump(UpperCamelCase__ )
UpperCAmelCase__ : Dict = yaml.safe_load(UpperCamelCase__ )
assert dataset_info_yaml_dict == reloaded
def _UpperCamelCase ( ):
UpperCAmelCase__ : Any = DatasetInfo()
UpperCAmelCase__ : Optional[int] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"""dataset_infos_dict""" , [
DatasetInfosDict(),
DatasetInfosDict({"""default""": DatasetInfo()} ),
DatasetInfosDict({"""my_config_name""": DatasetInfo()} ),
DatasetInfosDict(
{
"""default""": DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=4_2 , )
} ),
DatasetInfosDict(
{
"""v1""": DatasetInfo(dataset_size=4_2 ),
"""v2""": DatasetInfo(dataset_size=1_3_3_7 ),
} ),
] , )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Dict = str(UpperCamelCase__ )
dataset_infos_dict.write_to_directory(UpperCamelCase__ )
UpperCAmelCase__ : int = DatasetInfosDict.from_directory(UpperCamelCase__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
UpperCAmelCase__ : Union[str, Any] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
UpperCAmelCase__ : Tuple = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(UpperCamelCase__ , """README.md""" ) ) | 361 |
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ = 4_0_0_0_0_0_0 ):
UpperCAmelCase__ : List[str] = [0, 1]
UpperCAmelCase__ : Any = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
UpperCAmelCase__ : str = 0
for j in range(len(UpperCamelCase__ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""") | 283 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A : Any = {"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
A : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 184 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_=7 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =None
if token is not None:
SCREAMING_SNAKE_CASE ={'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
# The id of a workflow (not of a workflow run)
SCREAMING_SNAKE_CASE ='636036'
SCREAMING_SNAKE_CASE =F'https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'
SCREAMING_SNAKE_CASE =requests.get(lowerCAmelCase_, headers=lowerCAmelCase_ ).json()
return result["workflow_runs"]
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =get_daily_ci_runs(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
SCREAMING_SNAKE_CASE =workflow_run['id']
break
return workflow_run_id
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =get_last_daily_ci_runs(lowerCAmelCase_ )
if workflow_run_id is not None:
SCREAMING_SNAKE_CASE =get_artifacts_links(worflow_run_id=lowerCAmelCase_, token=lowerCAmelCase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
SCREAMING_SNAKE_CASE =artifacts_links[artifact_name]
download_artifact(
artifact_name=lowerCAmelCase_, artifact_url=lowerCAmelCase_, output_dir=lowerCAmelCase_, token=lowerCAmelCase_ )
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
get_last_daily_ci_artifacts(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
SCREAMING_SNAKE_CASE ={}
for artifact_name in artifact_names:
SCREAMING_SNAKE_CASE =os.path.join(lowerCAmelCase_, F'{artifact_name}.zip' )
if os.path.isfile(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE ={}
with zipfile.ZipFile(lowerCAmelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCAmelCase_ ):
# read the file
with z.open(lowerCAmelCase_ ) as f:
SCREAMING_SNAKE_CASE =f.read().decode('UTF-8' )
return results
| 334 | 0 |
def _a ( SCREAMING_SNAKE_CASE__ : int = 50_00_00_00 ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = set()
SCREAMING_SNAKE_CASE__ : int = int((limit - 24) ** (1 / 2) )
SCREAMING_SNAKE_CASE__ : str = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowerCAmelCase__ ) ) )
for primea in primes:
SCREAMING_SNAKE_CASE__ : List[Any] = primea * primea
for primea in primes:
SCREAMING_SNAKE_CASE__ : Any = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = primea * primea * primea * primea
SCREAMING_SNAKE_CASE__ : Dict = square + cube + tetr
if total >= limit:
break
ret.add(lowerCAmelCase__ )
return len(lowerCAmelCase__ )
if __name__ == "__main__":
print(f"{solution() = }")
| 366 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : complex , SCREAMING_SNAKE_CASE__ : str = "x" , SCREAMING_SNAKE_CASE__ : float = 10**-10 , SCREAMING_SNAKE_CASE__ : int = 1 , ) -> complex:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = symbols(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = lambdify(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = lambdify(SCREAMING_SNAKE_CASE__ , diff(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : Any = starting_point
while True:
if diff_function(SCREAMING_SNAKE_CASE__ ) != 0:
SCREAMING_SNAKE_CASE__ : Any = prev_guess - multiplicity * func(SCREAMING_SNAKE_CASE__ ) / diff_function(
SCREAMING_SNAKE_CASE__ )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
SCREAMING_SNAKE_CASE__ : Optional[int] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
# Find fourth Root of 5
print(f"The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f"{newton_raphson('log(y) - 1', 2, variable='y')}",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f"{newton_raphson('exp(x) - 1', 1_0, precision=0.005)}",
)
# Find root of cos(x)
print(f"The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}")
| 191 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = "▁"
__lowerCamelCase = {"vocab_file": "sentencepiece.bpe.model"}
__lowerCamelCase = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
__lowerCamelCase = {
"facebook/xglm-564M": 20_48,
}
class UpperCamelCase__( __A ):
lowerCAmelCase__ : Tuple = VOCAB_FILES_NAMES
lowerCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ : str = ['input_ids', 'attention_mask']
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase="<s>" ,__UpperCAmelCase="</s>" ,__UpperCAmelCase="</s>" ,__UpperCAmelCase="<s>" ,__UpperCAmelCase="<unk>" ,__UpperCAmelCase="<pad>" ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> None:
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
A__ = 7
A__ = [f'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
A__ = kwargs.get('additional_special_tokens' ,[] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=__UpperCAmelCase ,eos_token=__UpperCAmelCase ,unk_token=__UpperCAmelCase ,sep_token=__UpperCAmelCase ,cls_token=__UpperCAmelCase ,pad_token=__UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCAmelCase ,)
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
A__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
A__ = 1
# Mimic fairseq token-to-id alignment for the first 4 token
A__ = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
A__ = len(self.sp_model )
A__ = {f'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(__UpperCAmelCase )
A__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Dict:
A__ = self.__dict__.copy()
A__ = None
A__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self ,__UpperCAmelCase ) -> Optional[Any]:
A__ = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
A__ = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase ,token_ids_a=__UpperCAmelCase ,already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase ))
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase ))
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> List[int]:
A__ = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def snake_case__ ( self ) -> int:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def snake_case__ ( self ) -> Optional[Any]:
A__ = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case__ ( self ,__UpperCAmelCase ) -> List[str]:
return self.sp_model.encode(__UpperCAmelCase ,out_type=__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ) -> List[str]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A__ = self.sp_model.PieceToId(__UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case__ ( self ,__UpperCAmelCase ) -> int:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case__ ( self ,__UpperCAmelCase ) -> List[str]:
A__ = ''.join(__UpperCAmelCase ).replace(__UpperCAmelCase ,' ' ).strip()
return out_string
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ = os.path.join(
__UpperCAmelCase ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,__UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase ,'wb' ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 221 | """simple docstring"""
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
__lowerCamelCase = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
__lowerCamelCase = "main"
# Default branch name
__lowerCamelCase = "f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"
# One particular commit (not the top of `main`)
__lowerCamelCase = "aaaaaaa"
# This commit does not exist, so we should 404.
__lowerCamelCase = "d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"
# Sha-1 of config.json on the top of `main`, for checking purposes
__lowerCamelCase = "4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"
@contextlib.contextmanager
def UpperCAmelCase ( ):
"""simple docstring"""
print('Welcome!' )
yield
print('Bye!' )
@contextlib.contextmanager
def UpperCAmelCase ( ):
"""simple docstring"""
print('Bonjour!' )
yield
print('Au revoir!' )
class UpperCamelCase__( unittest.TestCase ):
def snake_case__ ( self ) -> List[str]:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec('transformers' ) is not None
class UpperCamelCase__( unittest.TestCase ):
@unittest.mock.patch('sys.stdout' ,new_callable=io.StringIO )
def snake_case__ ( self ,__UpperCAmelCase ) -> Dict:
with ContextManagers([] ):
print('Transformers are awesome!' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() ,'Transformers are awesome!\n' )
@unittest.mock.patch('sys.stdout' ,new_callable=io.StringIO )
def snake_case__ ( self ,__UpperCAmelCase ) -> List[str]:
with ContextManagers([context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() ,'Welcome!\nTransformers are awesome!\nBye!\n' )
@unittest.mock.patch('sys.stdout' ,new_callable=io.StringIO )
def snake_case__ ( self ,__UpperCAmelCase ) -> Any:
with ContextManagers([context_fr(), context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() ,'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n' )
@require_torch
def snake_case__ ( self ) -> Union[str, Any]:
self.assertEqual(find_labels(__UpperCAmelCase ) ,['labels'] )
self.assertEqual(find_labels(__UpperCAmelCase ) ,['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(__UpperCAmelCase ) ,['start_positions', 'end_positions'] )
class UpperCamelCase__( __A ):
pass
self.assertEqual(find_labels(__UpperCAmelCase ) ,['labels'] )
@require_tf
def snake_case__ ( self ) -> str:
self.assertEqual(find_labels(__UpperCAmelCase ) ,['labels'] )
self.assertEqual(find_labels(__UpperCAmelCase ) ,['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(__UpperCAmelCase ) ,['start_positions', 'end_positions'] )
class UpperCamelCase__( __A ):
pass
self.assertEqual(find_labels(__UpperCAmelCase ) ,['labels'] )
@require_flax
def snake_case__ ( self ) -> List[Any]:
# Flax models don't have labels
self.assertEqual(find_labels(__UpperCAmelCase ) ,[] )
self.assertEqual(find_labels(__UpperCAmelCase ) ,[] )
self.assertEqual(find_labels(__UpperCAmelCase ) ,[] )
class UpperCamelCase__( __A ):
pass
self.assertEqual(find_labels(__UpperCAmelCase ) ,[] )
| 221 | 1 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
UpperCamelCase_ : List[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
UpperCamelCase_ : Optional[Any] = 128022
UpperCamelCase_ : Optional[int] = 128028
@require_sentencepiece
class _a ( __lowerCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : str = MaMaaaTokenizer
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : int = True
def _lowercase ( self ) -> Dict:
super().setUp()
_snake_case = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
_snake_case = dict(zip(_SCREAMING_SNAKE_CASE ,range(len(_SCREAMING_SNAKE_CASE ) ) ) )
_snake_case = Path(self.tmpdirname )
save_json(_SCREAMING_SNAKE_CASE ,save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_SCREAMING_SNAKE_CASE ,save_dir / VOCAB_FILES_NAMES["spm_file"] )
_snake_case = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self ,**_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname ,**_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> str:
return (
"This is a test",
"This is a test",
)
def _lowercase ( self ) -> Optional[int]:
_snake_case = "</s>"
_snake_case = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
def _lowercase ( self ) -> Union[str, Any]:
_snake_case = self.get_tokenizer()
_snake_case = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"</s>" )
self.assertEqual(vocab_keys[1] ,"<unk>" )
self.assertEqual(vocab_keys[-1] ,"<s>" )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def _lowercase ( self ) -> List[str]:
pass
def _lowercase ( self ) -> Optional[int]:
_snake_case = self.get_tokenizer()
_snake_case = tokenizer.tokenize("This is a test" )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) ,[2, 3, 4, 5, 6] ,)
_snake_case = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,["▁This", "▁is", "▁a", "▁t", "est"] )
_snake_case = tokenizer.convert_tokens_to_string(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE ,"This is a test" )
@slow
def _lowercase ( self ) -> Dict:
# fmt: off
_snake_case = {"input_ids": [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE ,model_name="facebook/m2m100_418M" ,revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" ,)
@require_torch
@require_sentencepiece
@require_tokenizers
class _a ( unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = """facebook/m2m100_418M"""
SCREAMING_SNAKE_CASE_ : List[str] = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
SCREAMING_SNAKE_CASE_ : Optional[int] = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
SCREAMING_SNAKE_CASE_ : int = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def _lowercase ( cls ) -> Tuple:
_snake_case = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name ,src_lang="en" ,tgt_lang="fr" )
_snake_case = 1
return cls
def _lowercase ( self ) -> List[str]:
self.assertEqual(self.tokenizer.get_lang_id("ar" ) ,128_006 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) ,128_022 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) ,128_076 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) ,128_063 )
def _lowercase ( self ) -> str:
_snake_case = self.tokenizer.get_vocab()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] ,3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) ,_SCREAMING_SNAKE_CASE )
def _lowercase ( self ) -> Optional[Any]:
_snake_case = "en"
_snake_case = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,_SCREAMING_SNAKE_CASE )
def _lowercase ( self ) -> Union[str, Any]:
self.assertIn(_SCREAMING_SNAKE_CASE ,self.tokenizer.all_special_ids )
# fmt: off
_snake_case = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2]
# fmt: on
_snake_case = self.tokenizer.decode(_SCREAMING_SNAKE_CASE ,skip_special_tokens=_SCREAMING_SNAKE_CASE )
_snake_case = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.assertNotIn(self.tokenizer.eos_token ,_SCREAMING_SNAKE_CASE )
def _lowercase ( self ) -> Union[str, Any]:
_snake_case = tempfile.mkdtemp()
_snake_case = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
_snake_case = MaMaaaTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertDictEqual(new_tok.lang_token_to_id ,_SCREAMING_SNAKE_CASE )
@require_torch
def _lowercase ( self ) -> List[Any]:
_snake_case = "en"
_snake_case = "fr"
_snake_case = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=_SCREAMING_SNAKE_CASE ,return_tensors="pt" )
_snake_case = shift_tokens_right(
batch["labels"] ,self.tokenizer.pad_token_id ,self.tokenizer.eos_token_id )
for k in batch:
_snake_case = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def _lowercase ( self ) -> str:
_snake_case = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
_snake_case = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
@require_torch
def _lowercase ( self ) -> Optional[int]:
_snake_case = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
_snake_case = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def _lowercase ( self ) -> List[str]:
_snake_case = self.tokenizer._build_translation_inputs("A test" ,return_tensors="pt" ,src_lang="en" ,tgt_lang="ar" )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) ,{
# en_XX, A, test, EOS
"input_ids": [[128_022, 58, 4_183, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 128_006,
} ,)
| 142 |
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __a ( _UpperCamelCase: Dict , _UpperCamelCase: Optional[int] , _UpperCamelCase: List[str] ) -> Optional[Any]:
"""simple docstring"""
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def __a ( _UpperCamelCase: List[Any] , _UpperCamelCase: Optional[Any] , _UpperCamelCase: Dict , _UpperCamelCase: Optional[Any]="attention" ) -> Any:
"""simple docstring"""
_snake_case = _snake_case = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
_snake_case = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
_snake_case = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
_snake_case = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
_snake_case = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
_snake_case = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
_snake_case = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
_snake_case = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __a ( _UpperCamelCase: Tuple , _UpperCamelCase: Optional[int] , _UpperCamelCase: Optional[int] , _UpperCamelCase: Optional[int]=False ) -> List[Any]:
"""simple docstring"""
if split_mlp_wi:
_snake_case = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
_snake_case = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
_snake_case = (wi_a, wi_a)
else:
_snake_case = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
_snake_case = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def __a ( _UpperCamelCase: Optional[int] , _UpperCamelCase: Dict , _UpperCamelCase: Union[str, Any] , _UpperCamelCase: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def __a ( _UpperCamelCase: dict , *, _UpperCamelCase: int , _UpperCamelCase: bool , _UpperCamelCase: bool = False ) -> str:
"""simple docstring"""
_snake_case = traverse_util.flatten_dict(variables["target"] )
_snake_case = {"/".join(_UpperCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_snake_case = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , _UpperCamelCase )
_snake_case = collections.OrderedDict()
# Shared embeddings.
_snake_case = old["token_embedder/embedding"]
# Encoder.
for i in range(_UpperCamelCase ):
# Block i, layer 0 (Self Attention).
_snake_case = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , "encoder" , "pre_attention_layer_norm" )
_snake_case , _snake_case , _snake_case , _snake_case = tax_attention_lookup(_UpperCamelCase , _UpperCamelCase , "encoder" , "attention" )
_snake_case = layer_norm
_snake_case = k.T
_snake_case = o.T
_snake_case = q.T
_snake_case = v.T
# Block i, layer 1 (MLP).
_snake_case = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , "encoder" , "pre_mlp_layer_norm" )
_snake_case , _snake_case = tax_mlp_lookup(_UpperCamelCase , _UpperCamelCase , "encoder" , _UpperCamelCase )
_snake_case = layer_norm
if split_mlp_wi:
_snake_case = wi[0].T
_snake_case = wi[1].T
else:
_snake_case = wi.T
_snake_case = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_snake_case = tax_relpos_bias_lookup(
_UpperCamelCase , _UpperCamelCase , "encoder" ).T
_snake_case = old["encoder/encoder_norm/scale"]
if not scalable_attention:
_snake_case = tax_relpos_bias_lookup(
_UpperCamelCase , 0 , "encoder" ).T
_snake_case = tax_relpos_bias_lookup(
_UpperCamelCase , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(_UpperCamelCase ):
# Block i, layer 0 (Self Attention).
_snake_case = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , "decoder" , "pre_self_attention_layer_norm" )
_snake_case , _snake_case , _snake_case , _snake_case = tax_attention_lookup(_UpperCamelCase , _UpperCamelCase , "decoder" , "self_attention" )
_snake_case = layer_norm
_snake_case = k.T
_snake_case = o.T
_snake_case = q.T
_snake_case = v.T
# Block i, layer 1 (Cross Attention).
_snake_case = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , "decoder" , "pre_cross_attention_layer_norm" )
_snake_case , _snake_case , _snake_case , _snake_case = tax_attention_lookup(_UpperCamelCase , _UpperCamelCase , "decoder" , "encoder_decoder_attention" )
_snake_case = layer_norm
_snake_case = k.T
_snake_case = o.T
_snake_case = q.T
_snake_case = v.T
# Block i, layer 2 (MLP).
_snake_case = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , "decoder" , "pre_mlp_layer_norm" )
_snake_case , _snake_case = tax_mlp_lookup(_UpperCamelCase , _UpperCamelCase , "decoder" , _UpperCamelCase )
_snake_case = layer_norm
if split_mlp_wi:
_snake_case = wi[0].T
_snake_case = wi[1].T
else:
_snake_case = wi.T
_snake_case = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_snake_case = tax_relpos_bias_lookup(_UpperCamelCase , _UpperCamelCase , "decoder" ).T
_snake_case = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_snake_case = old["decoder/logits_dense/kernel"].T
return new
def __a ( _UpperCamelCase: Any , _UpperCamelCase: bool ) -> Dict:
"""simple docstring"""
_snake_case = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_snake_case = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_snake_case = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
_snake_case = state_dict["shared.weight"]
return state_dict
def __a ( _UpperCamelCase: str , _UpperCamelCase: List[str] , _UpperCamelCase: Any , _UpperCamelCase: str , _UpperCamelCase: List[Any] ) -> Dict:
"""simple docstring"""
_snake_case = checkpoints.load_tax_checkpoint(_UpperCamelCase )
_snake_case = convert_tax_to_pytorch(
_UpperCamelCase , num_layers=config.num_layers , is_encoder_only=_UpperCamelCase , scalable_attention=_UpperCamelCase )
_snake_case = make_state_dict(_UpperCamelCase , _UpperCamelCase )
model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
def __a ( _UpperCamelCase: Union[str, Any] , _UpperCamelCase: Union[str, Any] , _UpperCamelCase: Optional[Any] , _UpperCamelCase: bool = False , _UpperCamelCase: bool = False , ) -> Dict:
"""simple docstring"""
_snake_case = MTaConfig.from_json_file(_UpperCamelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_snake_case = UMTaEncoderModel(_UpperCamelCase )
else:
_snake_case = UMTaForConditionalGeneration(_UpperCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(_UpperCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(_UpperCamelCase )
print("Done" )
if __name__ == "__main__":
UpperCamelCase_ : Any = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
UpperCamelCase_ : Union[str, Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 142 | 1 |
"""simple docstring"""
__A = {}
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
lowerCAmelCase__ :Dict = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
lowerCAmelCase__ :str = _calculate(days - 1 , lowerCamelCase_ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
lowerCAmelCase__ :int = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
lowerCAmelCase__ :Tuple = _calculate(days - 1 , lowerCamelCase_ , 0 )
lowerCAmelCase__ :Tuple = state_late + state_absent + state_ontime
lowerCAmelCase__ :str = prizestrings
return prizestrings
def __A (_SCREAMING_SNAKE_CASE = 30 ) ->int:
"""simple docstring"""
return _calculate(lowerCamelCase_ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 293 |
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if not numbers:
return 0
if not isinstance(lowerCamelCase_ , (list, tuple) ) or not all(
isinstance(lowerCamelCase_ , lowerCamelCase_ ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
_lowercase : int = numbers[0]
for i in range(1 , len(lowerCamelCase_ ) ):
# update the maximum and minimum subarray products
_lowercase : Union[str, Any] = numbers[i]
if number < 0:
_lowercase , _lowercase : Any = min_till_now, max_till_now
_lowercase : Union[str, Any] = max(lowerCamelCase_ , max_till_now * number )
_lowercase : Union[str, Any] = min(lowerCamelCase_ , min_till_now * number )
# update the maximum product found till now
_lowercase : Optional[Any] = max(lowerCamelCase_ , lowerCamelCase_ )
return max_prod
| 21 | 0 |
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
UpperCamelCase = logging.getLogger(__name__)
class snake_case_ ( __A ):
__A : int = "token-classification"
def __init__( self : Tuple , lowercase_ : Dict ) -> List[str]:
if type(lowercase_ ) == dict:
lowercase__ : Dict = Namespace(**lowercase_ )
lowercase__ : str = import_module("tasks" )
try:
lowercase__ : Tuple = getattr(lowercase_ , hparams.task_type )
lowercase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
lowercase__ : Optional[Any] = self.token_classification_task.get_labels(hparams.labels )
lowercase__ : int = CrossEntropyLoss().ignore_index
super().__init__(lowercase_ , len(self.labels ) , self.mode )
def __UpperCamelCase ( self : Union[str, Any] , **lowercase_ : List[str] ) -> Any:
return self.model(**lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[int] ) -> Tuple:
lowercase__ : int = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
lowercase__ : Tuple = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ : Optional[int] = self(**lowercase_ )
lowercase__ : Union[str, Any] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
lowercase__ : Tuple = self.hparams
for mode in ["train", "dev", "test"]:
lowercase__ : Any = self._feature_file(lowercase_ )
if os.path.exists(lowercase_ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , lowercase_ )
lowercase__ : str = torch.load(lowercase_ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
lowercase__ : Optional[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , lowercase_ )
lowercase__ : Dict = self.token_classification_task.convert_examples_to_features(
lowercase_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["xlnet"] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=lowercase_ , pad_on_left=bool(self.config.model_type in ["xlnet"] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("Saving features into cached file %s" , lowercase_ )
torch.save(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : bool = False ) -> DataLoader:
lowercase__ : str = self._feature_file(lowercase_ )
logger.info("Loading features from cached file %s" , lowercase_ )
lowercase__ : str = torch.load(lowercase_ )
lowercase__ : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowercase__ : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowercase__ : Dict = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowercase__ : Dict = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowercase__ : List[str] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , batch_size=lowercase_ )
def __UpperCamelCase ( self : str , lowercase_ : Dict , lowercase_ : Tuple ) -> str:
"""Compute validation""" ""
lowercase__ : Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
lowercase__ : int = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ : List[Any] = self(**lowercase_ )
lowercase__ , lowercase__ : Any = outputs[:2]
lowercase__ : Optional[Any] = logits.detach().cpu().numpy()
lowercase__ : int = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __UpperCamelCase ( self : Optional[int] , lowercase_ : Any ) -> List[Any]:
lowercase__ : int = torch.stack([x["val_loss"] for x in outputs] ).mean()
lowercase__ : Any = np.concatenate([x["pred"] for x in outputs] , axis=0 )
lowercase__ : Dict = np.argmax(lowercase_ , axis=2 )
lowercase__ : int = np.concatenate([x["target"] for x in outputs] , axis=0 )
lowercase__ : Any = dict(enumerate(self.labels ) )
lowercase__ : List[Any] = [[] for _ in range(out_label_ids.shape[0] )]
lowercase__ : Dict = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowercase__ : Any = {
"val_loss": val_loss_mean,
"accuracy_score": accuracy_score(lowercase_ , lowercase_ ),
"precision": precision_score(lowercase_ , lowercase_ ),
"recall": recall_score(lowercase_ , lowercase_ ),
"f1": fa_score(lowercase_ , lowercase_ ),
}
lowercase__ : List[Any] = dict(results.items() )
lowercase__ : List[str] = results
return ret, preds_list, out_label_list
def __UpperCamelCase ( self : Any , lowercase_ : Dict ) -> Dict:
# when stable
lowercase__ , lowercase__ , lowercase__ : Dict = self._eval_end(lowercase_ )
lowercase__ : Any = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __UpperCamelCase ( self : str , lowercase_ : Tuple ) -> int:
# updating to test_epoch_end instead of deprecated test_end
lowercase__ , lowercase__ , lowercase__ : Dict = self._eval_end(lowercase_ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowercase__ : Optional[int] = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __UpperCamelCase ( lowercase_ : int , lowercase_ : Union[str, Any] ) -> Tuple:
# Add NER specific options
BaseTransformer.add_model_specific_args(lowercase_ , lowercase_ )
parser.add_argument(
"--task_type" , default="NER" , type=lowercase_ , help="Task type to fine tune in training (e.g. NER, POS, etc)" )
parser.add_argument(
"--max_seq_length" , default=1_28 , type=lowercase_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--labels" , default="" , type=lowercase_ , help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." , )
parser.add_argument(
"--gpus" , default=0 , type=lowercase_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
UpperCamelCase = NERTransformer.add_model_specific_args(parser, os.getcwd())
UpperCamelCase = parser.parse_args()
UpperCamelCase = NERTransformer(args)
UpperCamelCase = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
UpperCamelCase = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
UpperCamelCase = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 333 | import argparse
import datetime
def lowercase_ ( _lowerCamelCase : str):
lowercase__ : Optional[Any] = {
"0": "Sunday",
"1": "Monday",
"2": "Tuesday",
"3": "Wednesday",
"4": "Thursday",
"5": "Friday",
"6": "Saturday",
}
lowercase__ : Any = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(_lowerCamelCase) < 11:
raise ValueError("Must be 10 characters long")
# Get month
lowercase__ : int = int(date_input[0] + date_input[1])
# Validate
if not 0 < m < 13:
raise ValueError("Month must be between 1 - 12")
lowercase__ : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'")
# Get day
lowercase__ : int = int(date_input[3] + date_input[4])
# Validate
if not 0 < d < 32:
raise ValueError("Date must be between 1 - 31")
# Get second separator
lowercase__ : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'")
# Get year
lowercase__ : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9])
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
"Year out of range. There has to be some sort of limit...right?")
# Get datetime obj for validation
lowercase__ : Union[str, Any] = datetime.date(int(_lowerCamelCase) , int(_lowerCamelCase) , int(_lowerCamelCase))
# Start math
if m <= 2:
lowercase__ : Optional[Any] = y - 1
lowercase__ : int = m + 12
# maths var
lowercase__ : int = int(str(_lowerCamelCase)[:2])
lowercase__ : int = int(str(_lowerCamelCase)[2:])
lowercase__ : int = int(2.6 * m - 5.39)
lowercase__ : int = int(c / 4)
lowercase__ : int = int(k / 4)
lowercase__ : int = int(d + k)
lowercase__ : int = int(t + u + v + x)
lowercase__ : int = int(z - (2 * c))
lowercase__ : int = round(w % 7)
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("The date was evaluated incorrectly. Contact developer.")
# Response
lowercase__ : str = f'''Your date {date_input}, is a {days[str(_lowerCamelCase)]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
UpperCamelCase = parser.parse_args()
zeller(args.date_input)
| 333 | 1 |
def A_ ( A__ , A__ ) -> Any:
a__ : List[str] = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
a__ : Tuple = n - k
# Calculate C(n,k)
for i in range(SCREAMING_SNAKE_CASE_ ):
result *= n - i
result //= i + 1
return result
def A_ ( A__ ) -> Any:
return binomial_coefficient(2 * node_count , SCREAMING_SNAKE_CASE_ ) // (node_count + 1)
def A_ ( A__ ) -> Union[str, Any]:
if n < 0:
raise ValueError('factorial() not defined for negative values' )
a__ : int = 1
for i in range(1 , n + 1 ):
result *= i
return result
def A_ ( A__ ) -> Optional[Any]:
return catalan_number(SCREAMING_SNAKE_CASE_ ) * factorial(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowercase : Optional[int] = int(input("""Enter the number of nodes: """).strip() or 0)
if node_count <= 0:
raise ValueError("""We need some nodes to work with.""")
print(
F"""Given {node_count} nodes, there are {binary_tree_count(node_count)} """
F"""binary trees and {catalan_number(node_count)} binary search trees."""
)
| 99 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_snake_case = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
_snake_case = parser.parse_args()
_snake_case = '''cpu'''
_snake_case = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
_snake_case = '''path-to-your-trained-model'''
_snake_case = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_snake_case = pipe.to(device)
# to channels last
_snake_case = pipe.unet.to(memory_format=torch.channels_last)
_snake_case = pipe.vae.to(memory_format=torch.channels_last)
_snake_case = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_snake_case = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_snake_case = torch.randn(2, 4, 64, 64)
_snake_case = torch.rand(1) * 9_99
_snake_case = torch.randn(2, 77, 7_68)
_snake_case = (sample, timestep, encoder_hidden_status)
try:
_snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_snake_case = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_snake_case = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_snake_case = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_snake_case = 6_66
_snake_case = torch.Generator(device).manual_seed(seed)
_snake_case = {'''generator''': generator}
if args.steps is not None:
_snake_case = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_snake_case = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 283 | 0 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowercase__ :Any = logging.get_logger(__name__)
class lowercase ( _lowerCAmelCase ):
def __init__( self ,A__):
super().__init__()
lowercase = nn.ModuleList(SCREAMING_SNAKE_CASE_)
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = False ,A__ = True ,):
for i, (image, scale, controlnet) in enumerate(zip(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,self.nets)):
lowercase = controlnet(
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,)
# merge samples
if i == 0:
lowercase = down_samples, mid_sample
else:
lowercase = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_)
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def A__ ( self ,A__ ,A__ = True ,A__ = None ,A__ = False ,A__ = None ,):
lowercase = 0
lowercase = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
SCREAMING_SNAKE_CASE_ ,is_main_process=SCREAMING_SNAKE_CASE_ ,save_function=SCREAMING_SNAKE_CASE_ ,safe_serialization=SCREAMING_SNAKE_CASE_ ,variant=SCREAMING_SNAKE_CASE_ ,)
idx += 1
lowercase = model_path_to_save + f'_{idx}'
@classmethod
def A__ ( cls ,A__ ,**A__):
lowercase = 0
lowercase = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
lowercase = pretrained_model_path
while os.path.isdir(SCREAMING_SNAKE_CASE_):
lowercase = ControlNetModel.from_pretrained(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_)
controlnets.append(SCREAMING_SNAKE_CASE_)
idx += 1
lowercase = pretrained_model_path + f'_{idx}'
logger.info(f'{len(SCREAMING_SNAKE_CASE_)} controlnets loaded from {pretrained_model_path}.')
if len(SCREAMING_SNAKE_CASE_) == 0:
raise ValueError(
f'No ControlNets found under {os.path.dirname(SCREAMING_SNAKE_CASE_)}. Expected at least {pretrained_model_path + "_0"}.')
return cls(SCREAMING_SNAKE_CASE_)
| 366 |
from statistics import mean
import numpy as np
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = 0
# Number of processes finished
lowercase = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowercase = [0] * no_of_process
# List to include calculation results
lowercase = [0] * no_of_process
# Sort by arrival time.
lowercase = [burst_time[i] for i in np.argsort(lowerCAmelCase__ )]
lowercase = [process_name[i] for i in np.argsort(lowerCAmelCase__ )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowercase = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowercase = arrival_time[i]
lowercase = 0
# Index showing the location of the process being performed
lowercase = 0
# Saves the current response ratio.
lowercase = 0
for i in range(0 , lowerCAmelCase__ ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowercase = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowercase = temp
lowercase = i
# Calculate the turn around time
lowercase = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowercase = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = [0] * no_of_process
for i in range(0 , lowerCAmelCase__ ):
lowercase = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
lowercase__ :Dict = 5
lowercase__ :str = ["A", "B", "C", "D", "E"]
lowercase__ :Optional[int] = [1, 2, 3, 4, 5]
lowercase__ :List[Any] = [1, 2, 3, 4, 5]
lowercase__ :List[str] = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
lowercase__ :List[Any] = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("Process name \tArrival time \tBurst time \tTurn around time \tWaiting time")
for i in range(0, no_of_process):
print(
F'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'
F'{turn_around_time[i]}\t\t\t{waiting_time[i]}'
)
print(F'average waiting time : {mean(waiting_time):.5f}')
print(F'average turn around time : {mean(turn_around_time):.5f}')
| 97 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
while a != 0:
UpperCAmelCase , UpperCAmelCase = b % a, a
return b
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
if gcd(lowercase_ , lowercase_ ) != 1:
UpperCAmelCase = F"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(lowercase_ )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1, 0, a
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 0, 1, m
while va != 0:
UpperCAmelCase = ua // va
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 78 |
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( a_ : Union[str, Any] , a_ : Tuple , a_ : str=None ) -> Union[str, Any]:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
__SCREAMING_SNAKE_CASE :Dict = nn.Parameter(a_ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
__SCREAMING_SNAKE_CASE :Optional[int] = nn.Parameter(a_ )
def __lowerCamelCase ( a_ : Dict , a_ : str , a_ : Optional[int] ) -> Any:
# set torch weights for 1-to-1 comparison
__SCREAMING_SNAKE_CASE :List[Any] = np.asarray(weights[0] )
__SCREAMING_SNAKE_CASE :Optional[int] = np.asarray(weights[1] )
__SCREAMING_SNAKE_CASE :Optional[int] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(a_ ).transpose(1 , 2 ).contiguous().view(-1 , a_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(a_ ).transpose(1 , 2 ).contiguous().view(-1 , a_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(a_ ).view(-1 , a_ ).contiguous().transpose(0 , 1 ) , )
def __lowerCamelCase ( a_ : List[Any] , a_ : Dict , a_ : List[str] ) -> Union[str, Any]:
# set torch weights for 1-to-1 comparison
__SCREAMING_SNAKE_CASE :Union[str, Any] = np.asarray(weights[0] )
__SCREAMING_SNAKE_CASE :Union[str, Any] = np.asarray(weights[1] )
__SCREAMING_SNAKE_CASE :Any = np.asarray(weights[2] )
__SCREAMING_SNAKE_CASE :Dict = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(a_ ).transpose(1 , 2 ).contiguous().view(-1 , a_ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(a_ ).transpose(1 , 2 ).contiguous().view(-1 , a_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(a_ ).transpose(1 , 2 ).contiguous().view(-1 , a_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(a_ ).view(-1 , a_ ).contiguous().transpose(0 , 1 ) , )
def __lowerCamelCase ( a_ : Any , a_ : List[str] , a_ : Optional[int] ) -> Union[str, Any]:
# layernorm 1
__SCREAMING_SNAKE_CASE :Any = weights[0][0][0]
__SCREAMING_SNAKE_CASE :Union[str, Any] = np.asarray(layer_norm_a[0] )
__SCREAMING_SNAKE_CASE :Union[str, Any] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(a_ ) , torch.tensor(a_ ) , )
# lsh weights + output
__SCREAMING_SNAKE_CASE :List[Any] = weights[0][1]
if len(a_ ) < 4:
set_layer_weights_in_torch_lsh(a_ , torch_block.attention , a_ )
else:
set_layer_weights_in_torch_local(a_ , torch_block.attention , a_ )
# intermediate weighs
__SCREAMING_SNAKE_CASE :List[Any] = weights[2][0][1][2]
# Chunked Feed Forward
if len(a_ ) == 4:
__SCREAMING_SNAKE_CASE :List[str] = intermediate_weights[2]
# layernorm 2
__SCREAMING_SNAKE_CASE :Tuple = np.asarray(intermediate_weights[0][0] )
__SCREAMING_SNAKE_CASE :Union[str, Any] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(a_ ) , torch.tensor(a_ ) , )
# intermediate dense
__SCREAMING_SNAKE_CASE :int = np.asarray(intermediate_weights[1][0] )
__SCREAMING_SNAKE_CASE :int = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(a_ ).transpose(0 , 1 ).contiguous() , torch.tensor(a_ ) , )
# intermediate out
__SCREAMING_SNAKE_CASE :str = np.asarray(intermediate_weights[4][0] )
__SCREAMING_SNAKE_CASE :str = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(a_ ).transpose(0 , 1 ).contiguous() , torch.tensor(a_ ) , )
def __lowerCamelCase ( a_ : List[str] , a_ : str , a_ : List[Any] ) -> Optional[Any]:
# reformer model
__SCREAMING_SNAKE_CASE :Dict = torch_model.reformer
# word embeds
__SCREAMING_SNAKE_CASE :List[Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(a_ ) , )
if isinstance(weights[3] , a_ ):
__SCREAMING_SNAKE_CASE :List[Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
__SCREAMING_SNAKE_CASE :List[str] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
__SCREAMING_SNAKE_CASE :str = nn.Parameter(torch.tensor(a_ ) )
__SCREAMING_SNAKE_CASE :Optional[int] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
a_ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
__SCREAMING_SNAKE_CASE :Dict = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(a_ , a_ , a_ )
# output layer norm
__SCREAMING_SNAKE_CASE :Optional[int] = np.asarray(weights[7][0] )
__SCREAMING_SNAKE_CASE :List[Any] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(a_ ) , torch.tensor(a_ ) , )
# output embeddings
__SCREAMING_SNAKE_CASE :Optional[int] = np.asarray(weights[9][0] )
__SCREAMING_SNAKE_CASE :str = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(a_ ).transpose(0 , 1 ).contiguous() , torch.tensor(a_ ) , )
def __lowerCamelCase ( a_ : Any , a_ : Dict , a_ : Dict ) -> Tuple:
# Initialise PyTorch model
__SCREAMING_SNAKE_CASE :List[str] = ReformerConfig.from_json_file(a_ )
print(f'''Building PyTorch model from configuration: {config}''' )
__SCREAMING_SNAKE_CASE :List[Any] = ReformerModelWithLMHead(a_ )
with open(a_ , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE :Any = pickle.load(a_ )['''weights''']
set_model_weights_in_torch(a_ , a_ , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , a_ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCamelCase_ = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path) | 191 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
'''configuration_conditional_detr''': [
'''CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ConditionalDetrConfig''',
'''ConditionalDetrOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''ConditionalDetrFeatureExtractor''']
UpperCamelCase = ['''ConditionalDetrImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConditionalDetrForObjectDetection''',
'''ConditionalDetrForSegmentation''',
'''ConditionalDetrModel''',
'''ConditionalDetrPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 125 |
UpperCamelCase = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
UpperCamelCase = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> list[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(snake_case__ ,snake_case__ ,snake_case__ )
order.append(snake_case__ )
return order
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> list[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(snake_case__ ,snake_case__ ,snake_case__ )
return component
def __lowerCamelCase ( snake_case__ ) -> list[list[int]]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(snake_case__ ) * [False]
_SCREAMING_SNAKE_CASE = {vert: [] for vert in range(len(snake_case__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(snake_case__ )
_SCREAMING_SNAKE_CASE = []
for i, was_visited in enumerate(snake_case__ ):
if not was_visited:
order += topology_sort(snake_case__ ,snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = len(snake_case__ ) * [False]
for i in range(len(snake_case__ ) ):
_SCREAMING_SNAKE_CASE = order[len(snake_case__ ) - i - 1]
if not visited[vert]:
_SCREAMING_SNAKE_CASE = find_components(snake_case__ ,snake_case__ ,snake_case__ )
components_list.append(snake_case__ )
return components_list
| 125 | 1 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def _a ( ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ : str = {
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
lowerCamelCase__ : Optional[int] = Dataset.from_dict(UpperCAmelCase )
return dataset
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
def __lowerCamelCase ( self : List[Any] ) ->Optional[int]:
lowerCamelCase__ : Tuple = get_dataset()
lowerCamelCase__ : Dict = make_duplicate_clusters(A , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def __lowerCamelCase ( self : Optional[Any] ) ->Optional[int]:
lowerCamelCase__ : Union[str, Any] = get_dataset()
lowerCamelCase__ , lowerCamelCase__ : Tuple = deduplicate_dataset(A )
self.assertEqual(len(A ) , 2 )
print(A )
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , A )
| 142 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Optional[int] = ["image_processor", "tokenizer"]
_UpperCAmelCase : Dict = "BridgeTowerImageProcessor"
_UpperCAmelCase : Dict = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self : Any , A : List[Any] , A : Tuple ) ->Dict:
super().__init__(A , A )
def __call__( self : str , A : int , A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , A : bool = True , A : Union[bool, str, PaddingStrategy] = False , A : Union[bool, str, TruncationStrategy] = None , A : Optional[int] = None , A : int = 0 , A : Optional[int] = None , A : Optional[bool] = None , A : Optional[bool] = None , A : bool = False , A : bool = False , A : bool = False , A : bool = False , A : bool = True , A : Optional[Union[str, TensorType]] = None , **A : Union[str, Any] , ) ->BatchEncoding:
lowerCamelCase__ : Optional[int] = self.tokenizer(
text=A , add_special_tokens=A , padding=A , truncation=A , max_length=A , stride=A , pad_to_multiple_of=A , return_token_type_ids=A , return_attention_mask=A , return_overflowing_tokens=A , return_special_tokens_mask=A , return_offsets_mapping=A , return_length=A , verbose=A , return_tensors=A , **A , )
# add pixel_values + pixel_mask
lowerCamelCase__ : int = self.image_processor(
A , return_tensors=A , do_normalize=A , do_center_crop=A , **A )
encoding.update(A )
return encoding
def __lowerCamelCase ( self : str , *A : Dict , **A : List[str] ) ->List[Any]:
return self.tokenizer.batch_decode(*A , **A )
def __lowerCamelCase ( self : List[str] , *A : Optional[Any] , **A : Tuple ) ->Dict:
return self.tokenizer.decode(*A , **A )
@property
def __lowerCamelCase ( self : str ) ->Optional[Any]:
lowerCamelCase__ : Optional[int] = self.tokenizer.model_input_names
lowerCamelCase__ : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 142 | 1 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger()
@dataclass
class a :
_lowercase = 42
_lowercase = field(default_factory=UpperCAmelCase )
_lowercase = field(default_factory=UpperCAmelCase )
def _UpperCAmelCase ( self , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : Tuple = len(list(m.modules() ) ) == 1 or isinstance(A_ , nn.Convad ) or isinstance(A_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(A_ )
def __call__( self , A_ ):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(A_ )
[x.remove() for x in self.handles]
return self
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return list(filter(lambda A_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class a :
_lowercase = 42
_lowercase = 42
_lowercase = 0
_lowercase = field(default_factory=UpperCAmelCase )
_lowercase = field(default_factory=UpperCAmelCase )
def __call__( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : List[str] = Tracker(self.dest )(A_ ).parametrized
_UpperCAmelCase : List[Any] = Tracker(self.src )(A_ ).parametrized
_UpperCAmelCase : Union[str, Any] = list(filter(lambda A_ : type(A_ ) not in self.src_skip , A_ ) )
_UpperCAmelCase : Optional[Any] = list(filter(lambda A_ : type(A_ ) not in self.dest_skip , A_ ) )
if len(A_ ) != len(A_ ):
raise Exception(
f'Numbers of operations are different. Source module has {len(A_ )} operations while'
f' destination module has {len(A_ )}.' )
for dest_m, src_m in zip(A_ , A_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str , lowerCAmelCase: ResNetConfig , lowerCAmelCase: Path , lowerCAmelCase: bool = True ) -> int:
print(F'Converting {name}...' )
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = timm.create_model(lowerCAmelCase , pretrained=lowerCAmelCase ).eval()
_UpperCAmelCase : Optional[Any] = ResNetForImageClassification(lowerCAmelCase ).eval()
_UpperCAmelCase : Optional[int] = ModuleTransfer(src=lowerCAmelCase , dest=lowerCAmelCase )
_UpperCAmelCase : Tuple = torch.randn((1, 3, 224, 224) )
module_transfer(lowerCAmelCase )
assert torch.allclose(from_model(lowerCAmelCase ) , our_model(lowerCAmelCase ).logits ), "The model logits don't match the original one."
_UpperCAmelCase : Dict = F'resnet{"-".join(name.split("resnet" ) )}'
print(lowerCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=lowerCAmelCase , )
# we can use the convnext one
_UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=lowerCAmelCase , )
print(F'Pushed {checkpoint_name}' )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Path , lowerCAmelCase: str = None , lowerCAmelCase: bool = True ) -> Optional[Any]:
_UpperCAmelCase : List[Any] = "imagenet-1k-id2label.json"
_UpperCAmelCase : Tuple = 1000
_UpperCAmelCase : str = (1, num_labels)
_UpperCAmelCase : List[Any] = "huggingface/label-files"
_UpperCAmelCase : List[Any] = num_labels
_UpperCAmelCase : int = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase : Union[str, Any] = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase : int = idalabel
_UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
_UpperCAmelCase : List[str] = partial(lowerCAmelCase , num_labels=lowerCAmelCase , idalabel=lowerCAmelCase , labelaid=lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(lowerCAmelCase , names_to_config[model_name] , lowerCAmelCase , lowerCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return config, expected_shape
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 189 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class a ( UpperCAmelCase ):
_lowercase = ["image_processor", "tokenizer"]
_lowercase = "OwlViTImageProcessor"
_lowercase = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , A_=None , A_=None , **A_ ):
'''simple docstring'''
_UpperCAmelCase : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , A_ , )
_UpperCAmelCase : Union[str, Any] = kwargs.pop("feature_extractor" )
_UpperCAmelCase : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(A_ , A_ )
def __call__( self , A_=None , A_=None , A_=None , A_="max_length" , A_="np" , **A_ ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(A_ , A_ ) or (isinstance(A_ , A_ ) and not isinstance(text[0] , A_ )):
_UpperCAmelCase : Optional[int] = [self.tokenizer(A_ , padding=A_ , return_tensors=A_ , **A_ )]
elif isinstance(A_ , A_ ) and isinstance(text[0] , A_ ):
_UpperCAmelCase : Optional[int] = []
# Maximum number of queries across batch
_UpperCAmelCase : Optional[Any] = max([len(A_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(A_ ) != max_num_queries:
_UpperCAmelCase : Optional[int] = t + [" "] * (max_num_queries - len(A_ ))
_UpperCAmelCase : str = self.tokenizer(A_ , padding=A_ , return_tensors=A_ , **A_ )
encodings.append(A_ )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
_UpperCAmelCase : List[str] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_UpperCAmelCase : Tuple = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_UpperCAmelCase : Optional[Any] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_UpperCAmelCase : str = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_UpperCAmelCase : str = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
_UpperCAmelCase : Dict = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_UpperCAmelCase : Union[str, Any] = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
_UpperCAmelCase : Optional[int] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
_UpperCAmelCase : Optional[int] = BatchEncoding()
_UpperCAmelCase : str = input_ids
_UpperCAmelCase : Optional[Any] = attention_mask
if query_images is not None:
_UpperCAmelCase : int = BatchEncoding()
_UpperCAmelCase : str = self.image_processor(
A_ , return_tensors=A_ , **A_ ).pixel_values
_UpperCAmelCase : Optional[Any] = query_pixel_values
if images is not None:
_UpperCAmelCase : int = self.image_processor(A_ , return_tensors=A_ , **A_ )
if text is not None and images is not None:
_UpperCAmelCase : Optional[int] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_UpperCAmelCase : Any = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_ ) , tensor_type=A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return self.image_processor.post_process(*A_ , **A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*A_ , **A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*A_ , **A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_ , **A_ )
def _UpperCAmelCase ( self , *A_ , **A_ ):
'''simple docstring'''
return self.tokenizer.decode(*A_ , **A_ )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , A_ , )
return self.image_processor_class
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , A_ , )
return self.image_processor
| 189 | 1 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def __a ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase = emb.weight.shape
__UpperCAmelCase = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
__UpperCAmelCase = emb.weight.data
return lin_layer
def __a ( SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE , map_location='''cpu''' )
__UpperCAmelCase = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
__UpperCAmelCase = mam_aaa['''model''']
remove_ignore_keys_(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__UpperCAmelCase = MaMaaaConfig(
vocab_size=SCREAMING_SNAKE_CASE , max_position_embeddings=1_0_2_4 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
__UpperCAmelCase = state_dict['''decoder.embed_tokens.weight''']
__UpperCAmelCase = MaMaaaForConditionalGeneration(SCREAMING_SNAKE_CASE )
model.model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
__UpperCAmelCase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
A_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
A_ : Optional[int] = parser.parse_args()
A_ : List[Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 333 |
def __a ( SCREAMING_SNAKE_CASE ) -> set:
'''simple docstring'''
__UpperCAmelCase = set()
# edges = list of graph's edges
__UpperCAmelCase = get_edges(SCREAMING_SNAKE_CASE )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
__UpperCAmelCase , __UpperCAmelCase = edges.pop()
chosen_vertices.add(SCREAMING_SNAKE_CASE )
chosen_vertices.add(SCREAMING_SNAKE_CASE )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(SCREAMING_SNAKE_CASE )
return chosen_vertices
def __a ( SCREAMING_SNAKE_CASE ) -> set:
'''simple docstring'''
__UpperCAmelCase = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 333 | 1 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowercase :
lowercase__ : Dict = LEDConfig
lowercase__ : List[str] = {}
lowercase__ : Union[str, Any] = """gelu"""
def __init__( self : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : Dict=13 , _UpperCamelCase : Optional[int]=7 , _UpperCamelCase : int=True , _UpperCamelCase : List[Any]=False , _UpperCamelCase : Dict=99 , _UpperCamelCase : Optional[Any]=32 , _UpperCamelCase : Any=2 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : Union[str, Any]=37 , _UpperCamelCase : str=0.1 , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : Union[str, Any]=20 , _UpperCamelCase : str=2 , _UpperCamelCase : Optional[Any]=1 , _UpperCamelCase : Optional[int]=0 , _UpperCamelCase : int=4 , ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = eos_token_id
SCREAMING_SNAKE_CASE = pad_token_id
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
SCREAMING_SNAKE_CASE = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
SCREAMING_SNAKE_CASE = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __snake_case( self : int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
SCREAMING_SNAKE_CASE = prepare_led_inputs_dict(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = tf.concat(
[tf.zeros_like(_UpperCamelCase )[:, :-1], tf.ones_like(_UpperCamelCase )[:, -1:]] , axis=-1 , )
SCREAMING_SNAKE_CASE = global_attention_mask
return config, inputs_dict
def __snake_case( self : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TFLEDModel(config=_UpperCamelCase ).get_decoder()
SCREAMING_SNAKE_CASE = inputs_dict["input_ids"]
SCREAMING_SNAKE_CASE = input_ids[:1, :]
SCREAMING_SNAKE_CASE = inputs_dict["attention_mask"][:1, :]
SCREAMING_SNAKE_CASE = 1
# first forward pass
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , attention_mask=_UpperCamelCase , use_cache=_UpperCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , attention_mask=_UpperCamelCase )[0]
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_UpperCamelCase , _UpperCamelCase , rtol=1e-3 )
def __lowerCamelCase (UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Union[str, Any]=None , ):
if attention_mask is None:
SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(UpperCAmelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowercase ( a , a , unittest.TestCase ):
lowercase__ : Optional[int] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
lowercase__ : List[Any] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
lowercase__ : int = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase__ : List[Any] = True
lowercase__ : List[str] = False
lowercase__ : List[str] = False
lowercase__ : Union[str, Any] = False
def __snake_case( self : Tuple ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TFLEDModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_UpperCamelCase )
def __snake_case( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __snake_case( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCamelCase )
def __snake_case( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = tf.zeros_like(inputs_dict["attention_mask"] )
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = self.model_tester.seq_length
SCREAMING_SNAKE_CASE = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(_UpperCamelCase : Dict ):
SCREAMING_SNAKE_CASE = outputs.decoder_attentions
self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(_UpperCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = [t.numpy() for t in outputs.encoder_attentions]
SCREAMING_SNAKE_CASE = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
SCREAMING_SNAKE_CASE = model(self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
self.assertEqual(config.output_hidden_states , _UpperCamelCase )
check_encoder_attentions_output(_UpperCamelCase )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
SCREAMING_SNAKE_CASE = model(self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCamelCase )
check_decoder_attentions_output(_UpperCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
SCREAMING_SNAKE_CASE = model(self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCamelCase )
check_encoder_attentions_output(_UpperCamelCase )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
SCREAMING_SNAKE_CASE = model(self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCamelCase ) )
self.assertEqual(model.config.output_hidden_states , _UpperCamelCase )
check_encoder_attentions_output(_UpperCamelCase )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def __snake_case( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
pass
def __snake_case( self : str ) -> str:
'''simple docstring'''
pass
def __lowerCamelCase (UpperCAmelCase__ : Optional[int] ):
return tf.constant(UpperCAmelCase__ , dtype=tf.intaa )
_lowerCamelCase : str = 1e-4
@slow
@require_tf
class lowercase ( unittest.TestCase ):
def __snake_case( self : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
SCREAMING_SNAKE_CASE = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
SCREAMING_SNAKE_CASE = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
SCREAMING_SNAKE_CASE = prepare_led_inputs_dict(model.config , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )[0]
SCREAMING_SNAKE_CASE = (1, 1_024, 768)
self.assertEqual(output.shape , _UpperCamelCase )
# change to expected output here
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[2.3_0_5_0, 2.8_2_7_9, 0.6_5_3_1], [-1.8_4_5_7, -0.1_4_5_5, -3.5_6_6_1], [-1.0_1_8_6, 0.4_5_8_6, -2.2_0_4_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCamelCase , atol=1e-3 )
def __snake_case( self : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
SCREAMING_SNAKE_CASE = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
SCREAMING_SNAKE_CASE = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
SCREAMING_SNAKE_CASE = prepare_led_inputs_dict(model.config , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )[0]
SCREAMING_SNAKE_CASE = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , _UpperCamelCase )
# change to expected output here
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[3_3.6_5_0_7, 6.4_5_7_2, 1_6.8_0_8_9], [5.8_7_3_9, -2.4_2_3_8, 1_1.2_9_0_2], [-3.2_1_3_9, -4.3_1_4_9, 4.2_7_8_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCamelCase , atol=1e-3 , rtol=1e-3 )
| 206 | import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase ( a ):
lowercase__ : Dict = (UniPCMultistepScheduler,)
lowercase__ : Optional[int] = (("""num_inference_steps""", 25),)
def __snake_case( self : List[str] , **_UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
"num_train_timesteps": 1_000,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**_UpperCamelCase )
return config
def __snake_case( self : List[str] , _UpperCamelCase : Dict=0 , **_UpperCamelCase : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps" , _UpperCamelCase )
SCREAMING_SNAKE_CASE = self.dummy_sample
SCREAMING_SNAKE_CASE = 0.1 * sample
SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(_UpperCamelCase )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_UpperCamelCase )
new_scheduler.set_timesteps(_UpperCamelCase )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = sample, sample
for t in range(_UpperCamelCase , time_step + scheduler.config.solver_order + 1 ):
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
SCREAMING_SNAKE_CASE = new_scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __snake_case( self : Any , _UpperCamelCase : Union[str, Any]=0 , **_UpperCamelCase : Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps" , _UpperCamelCase )
SCREAMING_SNAKE_CASE = self.dummy_sample
SCREAMING_SNAKE_CASE = 0.1 * sample
SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(_UpperCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(_UpperCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCamelCase )
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
SCREAMING_SNAKE_CASE = new_scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __snake_case( self : List[str] , _UpperCamelCase : Tuple=None , **_UpperCamelCase : List[Any] ) -> str:
'''simple docstring'''
if scheduler is None:
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = 10
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ).prev_sample
return sample
def __snake_case( self : int ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps" , _UpperCamelCase )
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.dummy_sample
SCREAMING_SNAKE_CASE = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCamelCase , "set_timesteps" ):
scheduler.set_timesteps(_UpperCamelCase )
elif num_inference_steps is not None and not hasattr(_UpperCamelCase , "set_timesteps" ):
SCREAMING_SNAKE_CASE = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order]
SCREAMING_SNAKE_CASE = scheduler.timesteps[5]
SCREAMING_SNAKE_CASE = scheduler.timesteps[6]
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __snake_case( self : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = UniPCMultistepScheduler(**self.get_scheduler_config() )
SCREAMING_SNAKE_CASE = self.full_loop(scheduler=_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
SCREAMING_SNAKE_CASE = DPMSolverSinglestepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE = DEISMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE = UniPCMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE = self.full_loop(scheduler=_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
def __snake_case( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=_UpperCamelCase )
def __snake_case( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
self.check_over_configs(thresholding=_UpperCamelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_UpperCamelCase , prediction_type=_UpperCamelCase , sample_max_value=_UpperCamelCase , solver_order=_UpperCamelCase , solver_type=_UpperCamelCase , )
def __snake_case( self : Tuple ) -> int:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCamelCase )
def __snake_case( self : Dict ) -> int:
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_UpperCamelCase , solver_type=_UpperCamelCase , prediction_type=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = self.full_loop(
solver_order=_UpperCamelCase , solver_type=_UpperCamelCase , prediction_type=_UpperCamelCase , )
assert not torch.isnan(_UpperCamelCase ).any(), "Samples have nan numbers"
def __snake_case( self : List[str] ) -> Tuple:
'''simple docstring'''
self.check_over_configs(lower_order_final=_UpperCamelCase )
self.check_over_configs(lower_order_final=_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=_UpperCamelCase , time_step=0 )
def __snake_case( self : Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.full_loop()
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
def __snake_case( self : List[str] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.full_loop(prediction_type="v_prediction" )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_UpperCamelCase ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1e-3
def __snake_case( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(thresholding=_UpperCamelCase , dynamic_thresholding_ratio=0 )
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = 10
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter.half()
scheduler.set_timesteps(_UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ).prev_sample
assert sample.dtype == torch.floataa
def __snake_case( self : List[str] , **_UpperCamelCase : Dict ) -> Union[str, Any]:
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class(**_UpperCamelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 206 | 1 |
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowercase_ ( enum.Enum ):
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 2
@add_end_docstrings(a__ )
class lowercase_ ( a__ ):
__UpperCAmelCase = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self , *a , **a ):
super().__init__(*a , **a )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
UpperCamelCase__ = None
if self.model.config.prefix is not None:
UpperCamelCase__ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
UpperCamelCase__ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._sanitize_parameters(prefix=a , **self._forward_params )
UpperCamelCase__ = {**self._preprocess_params, **preprocess_params}
UpperCamelCase__ = {**self._forward_params, **forward_params}
def __a ( self , a=None , a=None , a=None , a=None , a=None , a=None , a=None , a=None , **a , ):
UpperCamelCase__ = {}
if prefix is not None:
UpperCamelCase__ = prefix
if prefix:
UpperCamelCase__ = self.tokenizer(
a , padding=a , add_special_tokens=a , return_tensors=self.framework )
UpperCamelCase__ = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
" [None, 'hole']" )
UpperCamelCase__ = handle_long_generation
preprocess_params.update(a )
UpperCamelCase__ = generate_kwargs
UpperCamelCase__ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
UpperCamelCase__ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
UpperCamelCase__ = ReturnType.TENSORS
if return_type is not None:
UpperCamelCase__ = return_type
if clean_up_tokenization_spaces is not None:
UpperCamelCase__ = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCamelCase__ = self.tokenizer.encode(a , add_special_tokens=a )
if len(a ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
UpperCamelCase__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __a ( self , *a , **a ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*a , **a )
def __call__( self , a , **a ):
return super().__call__(a , **a )
def __a ( self , a , a="" , a=None , **a ):
UpperCamelCase__ = self.tokenizer(
prefix + prompt_text , padding=a , add_special_tokens=a , return_tensors=self.framework )
UpperCamelCase__ = prompt_text
if handle_long_generation == "hole":
UpperCamelCase__ = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
UpperCamelCase__ = generate_kwargs["max_new_tokens"]
else:
UpperCamelCase__ = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
UpperCamelCase__ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
UpperCamelCase__ = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
UpperCamelCase__ = inputs["attention_mask"][:, -keep_length:]
return inputs
def __a ( self , a , **a ):
UpperCamelCase__ = model_inputs["input_ids"]
UpperCamelCase__ = model_inputs.get("attention_mask" , a )
# Allow empty prompts
if input_ids.shape[1] == 0:
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = 1
else:
UpperCamelCase__ = input_ids.shape[0]
UpperCamelCase__ = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
UpperCamelCase__ = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
UpperCamelCase__ = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
UpperCamelCase__ = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
UpperCamelCase__ = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
UpperCamelCase__ = self.model.generate(input_ids=a , attention_mask=a , **a )
UpperCamelCase__ = generated_sequence.shape[0]
if self.framework == "pt":
UpperCamelCase__ = generated_sequence.reshape(a , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
UpperCamelCase__ = tf.reshape(a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __a ( self , a , a=ReturnType.FULL_TEXT , a=True ):
UpperCamelCase__ = model_outputs["generated_sequence"][0]
UpperCamelCase__ = model_outputs["input_ids"]
UpperCamelCase__ = model_outputs["prompt_text"]
UpperCamelCase__ = generated_sequence.numpy().tolist()
UpperCamelCase__ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
UpperCamelCase__ = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
UpperCamelCase__ = self.tokenizer.decode(
a , skip_special_tokens=a , clean_up_tokenization_spaces=a , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
UpperCamelCase__ = 0
else:
UpperCamelCase__ = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=a , clean_up_tokenization_spaces=a , ) )
if return_type == ReturnType.FULL_TEXT:
UpperCamelCase__ = prompt_text + text[prompt_length:]
else:
UpperCamelCase__ = text[prompt_length:]
UpperCamelCase__ = {"generated_text": all_text}
records.append(a )
return records
| 80 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__snake_case = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
__snake_case = {
'''facebook/blenderbot_small-90M''': 512,
}
class lowercase ( A__ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = BlenderbotSmallTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , UpperCamelCase_=True , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCamelCase_ , merges=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ , ) , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :Union[str, Any] = add_prefix_space
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = [self.sep_token_id]
UpperCamelCase__ :Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 97 | 0 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowercase__ = logging.get_logger(__name__)
lowercase__ = Dict[str, Any]
lowercase__ = List[Prediction]
@add_end_docstrings(A__ )
class __lowerCamelCase ( A__ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *a_ : Any , **a_ : Union[str, Any] ):
super().__init__(*a_ , **a_ )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def lowerCamelCase ( self : Union[str, Any] , **a_ : int ):
lowerCAmelCase_ : Optional[int] = {}
if "threshold" in kwargs:
lowerCAmelCase_ : Union[str, Any] = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self : Dict , *a_ : List[str] , **a_ : Any ):
return super().__call__(*a_ , **a_ )
def lowerCamelCase ( self : Optional[int] , a_ : str ):
lowerCAmelCase_ : Tuple = load_image(a_ )
lowerCAmelCase_ : Dict = torch.IntTensor([[image.height, image.width]] )
lowerCAmelCase_ : int = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
lowerCAmelCase_ : Optional[Any] = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
lowerCAmelCase_ : str = target_size
return inputs
def lowerCamelCase ( self : Optional[int] , a_ : Any ):
lowerCAmelCase_ : List[str] = model_inputs.pop("target_size" )
lowerCAmelCase_ : Optional[int] = self.model(**a_ )
lowerCAmelCase_ : Dict = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
lowerCAmelCase_ : Optional[int] = model_inputs["bbox"]
return model_outputs
def lowerCamelCase ( self : Union[str, Any] , a_ : List[Any] , a_ : List[Any]=0.9 ):
lowerCAmelCase_ : Any = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
lowerCAmelCase_ , lowerCAmelCase_ : str = target_size[0].tolist()
def unnormalize(a_ : str ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 10_00),
(height * bbox[1] / 10_00),
(width * bbox[2] / 10_00),
(height * bbox[3] / 10_00),
] ) )
lowerCAmelCase_ , lowerCAmelCase_ : Dict = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
lowerCAmelCase_ : Optional[Any] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
lowerCAmelCase_ : Any = [unnormalize(a_ ) for bbox in model_outputs["bbox"].squeeze(0 )]
lowerCAmelCase_ : Optional[int] = ["score", "label", "box"]
lowerCAmelCase_ : Optional[Any] = [dict(zip(a_ , a_ ) ) for vals in zip(scores.tolist() , a_ , a_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
lowerCAmelCase_ : Dict = self.image_processor.post_process_object_detection(a_ , a_ , a_ )
lowerCAmelCase_ : Any = raw_annotations[0]
lowerCAmelCase_ : List[Any] = raw_annotation["scores"]
lowerCAmelCase_ : List[Any] = raw_annotation["labels"]
lowerCAmelCase_ : Tuple = raw_annotation["boxes"]
lowerCAmelCase_ : List[str] = scores.tolist()
lowerCAmelCase_ : Any = [self.model.config.idalabel[label.item()] for label in labels]
lowerCAmelCase_ : Tuple = [self._get_bounding_box(a_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
lowerCAmelCase_ : int = ["score", "label", "box"]
lowerCAmelCase_ : Dict = [
dict(zip(a_ , a_ ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def lowerCamelCase ( self : Optional[Any] , a_ : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = box.int().tolist()
lowerCAmelCase_ : Any = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 161 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : Any = """dandelin/vilt-b32-finetuned-vqa"""
a_ : List[str] = (
"""This is a tool that answers a question about an image. It takes an input named `image` which should be the """
"""image containing the information, as well as a `question` which should be the question in English. It """
"""returns a text that is the answer to the question."""
)
a_ : Dict = """image_qa"""
a_ : Tuple = AutoProcessor
a_ : Optional[int] = AutoModelForVisualQuestionAnswering
a_ : Tuple = ["""image""", """text"""]
a_ : Optional[int] = ["""text"""]
def __init__( self : Dict , *a_ : List[Any] , **a_ : Tuple ):
requires_backends(self , ["vision"] )
super().__init__(*a_ , **a_ )
def lowerCamelCase ( self : int , a_ : "Image" , a_ : str ):
return self.pre_processor(a_ , a_ , return_tensors="pt" )
def lowerCamelCase ( self : List[Any] , a_ : Optional[int] ):
with torch.no_grad():
return self.model(**a_ ).logits
def lowerCamelCase ( self : List[Any] , a_ : Optional[Any] ):
lowerCAmelCase_ : List[str] = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 161 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ : Union[str, Any] = {
"configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"],
"processing_mgp_str": ["MgpstrProcessor"],
"tokenization_mgp_str": ["MgpstrTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
"MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST",
"MgpstrModel",
"MgpstrPreTrainedModel",
"MgpstrForSceneTextRecognition",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 125 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
snake_case_ : List[str] = logging.getLogger(__name__)
@dataclass
class __a :
__a : Optional[str] = field(
default="tab_fact" , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
__a : Optional[str] = field(
default="tab_fact" , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} , )
__a : int = field(
default=1_024 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__a : bool = field(
default=lowerCamelCase , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
__a : bool = field(
default=lowerCamelCase , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
__a : Optional[int] = field(
default=lowerCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__a : Optional[int] = field(
default=lowerCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
__a : Optional[int] = field(
default=lowerCamelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
__a : Optional[str] = field(
default=lowerCamelCase , metadata={"help": "A csv or a json file containing the training data."} )
__a : Optional[str] = field(
default=lowerCamelCase , metadata={"help": "A csv or a json file containing the validation data."} )
__a : Optional[str] = field(default=lowerCamelCase , metadata={"help": "A csv or a json file containing the test data."} )
def UpperCAmelCase__ ( self : Dict ) -> Any:
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
UpperCAmelCase_ : Dict = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
UpperCAmelCase_ : Union[str, Any] = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __a :
__a : str = field(
default=lowerCamelCase , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__a : Optional[str] = field(
default=lowerCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__a : Optional[str] = field(
default=lowerCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__a : Optional[str] = field(
default=lowerCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__a : bool = field(
default=lowerCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__a : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__a : bool = field(
default=lowerCamelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
def lowerCamelCase_ ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase_ : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', handlers=[logging.StreamHandler(sys.stdout )], )
UpperCAmelCase_ : List[str] = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE__ )
datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCAmelCase_ : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase_ : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
UpperCAmelCase_ : List[Any] = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
UpperCAmelCase_ : Dict = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
UpperCAmelCase_ : Dict = data_args.train_file.split('''.''' )[-1]
UpperCAmelCase_ : Union[str, Any] = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
UpperCAmelCase_ : int = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(F"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
UpperCAmelCase_ : List[Any] = load_dataset('''csv''', data_files=SCREAMING_SNAKE_CASE__, cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
UpperCAmelCase_ : int = load_dataset('''json''', data_files=SCREAMING_SNAKE_CASE__, cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
UpperCAmelCase_ : Optional[Any] = raw_datasets['''train'''].features['''label'''].names
UpperCAmelCase_ : List[str] = len(SCREAMING_SNAKE_CASE__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=SCREAMING_SNAKE_CASE__, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# load tapex tokenizer
UpperCAmelCase_ : str = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, add_prefix_space=SCREAMING_SNAKE_CASE__, )
UpperCAmelCase_ : Union[str, Any] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=SCREAMING_SNAKE_CASE__, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# Padding strategy
if data_args.pad_to_max_length:
UpperCAmelCase_ : Optional[int] = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
UpperCAmelCase_ : Dict = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
UpperCAmelCase_ : Tuple = {'''Refused''': 0, '''Entailed''': 1}
UpperCAmelCase_ : Tuple = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
UpperCAmelCase_ : int = min(data_args.max_seq_length, tokenizer.model_max_length )
def preprocess_tabfact_function(SCREAMING_SNAKE_CASE__ : Optional[int] ):
# Tokenize the texts
def _convert_table_text_to_pandas(SCREAMING_SNAKE_CASE__ : Tuple ):
UpperCAmelCase_ : List[str] = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
UpperCAmelCase_ : Any = pd.DataFrame.from_records(_table_content[1:], columns=_table_content[0] )
return _table_pd
UpperCAmelCase_ : Optional[Any] = examples['''statement''']
UpperCAmelCase_ : Union[str, Any] = list(map(_convert_table_text_to_pandas, examples['''table_text'''] ) )
UpperCAmelCase_ : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, padding=SCREAMING_SNAKE_CASE__, max_length=SCREAMING_SNAKE_CASE__, truncation=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[Any] = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
UpperCAmelCase_ : List[str] = raw_datasets.map(
SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__, load_from_cache_file=not data_args.overwrite_cache, desc='''Running tokenizer on dataset''', )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
UpperCAmelCase_ : Any = raw_datasets['''train''']
if data_args.max_train_samples is not None:
UpperCAmelCase_ : Dict = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
UpperCAmelCase_ : str = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
UpperCAmelCase_ : Any = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
UpperCAmelCase_ : Dict = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
UpperCAmelCase_ : List[str] = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(SCREAMING_SNAKE_CASE__ ) ), 3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(SCREAMING_SNAKE_CASE__ : EvalPrediction ):
UpperCAmelCase_ : Any = p.predictions[0] if isinstance(p.predictions, SCREAMING_SNAKE_CASE__ ) else p.predictions
UpperCAmelCase_ : Optional[int] = np.argmax(SCREAMING_SNAKE_CASE__, axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
UpperCAmelCase_ : Optional[Any] = default_data_collator
elif training_args.fpaa:
UpperCAmelCase_ : str = DataCollatorWithPadding(SCREAMING_SNAKE_CASE__, pad_to_multiple_of=8 )
else:
UpperCAmelCase_ : List[Any] = None
# Initialize our Trainer
UpperCAmelCase_ : int = Trainer(
model=SCREAMING_SNAKE_CASE__, args=SCREAMING_SNAKE_CASE__, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, compute_metrics=SCREAMING_SNAKE_CASE__, tokenizer=SCREAMING_SNAKE_CASE__, data_collator=SCREAMING_SNAKE_CASE__, )
# Training
if training_args.do_train:
UpperCAmelCase_ : Dict = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase_ : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase_ : Optional[int] = last_checkpoint
UpperCAmelCase_ : Dict = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Any = train_result.metrics
UpperCAmelCase_ : Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE__ )
)
UpperCAmelCase_ : List[Any] = min(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''', SCREAMING_SNAKE_CASE__ )
trainer.save_metrics('''train''', SCREAMING_SNAKE_CASE__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase_ : Union[str, Any] = trainer.evaluate(eval_dataset=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Tuple = min(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) )
trainer.log_metrics('''eval''', SCREAMING_SNAKE_CASE__ )
trainer.save_metrics('''eval''', SCREAMING_SNAKE_CASE__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
UpperCAmelCase_ : Optional[int] = predict_dataset.remove_columns('''label''' )
UpperCAmelCase_ : Union[str, Any] = trainer.predict(SCREAMING_SNAKE_CASE__, metric_key_prefix='''predict''' ).predictions
UpperCAmelCase_ : Any = np.argmax(SCREAMING_SNAKE_CASE__, axis=1 )
UpperCAmelCase_ : int = os.path.join(training_args.output_dir, '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE__, '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_ : Dict = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
UpperCAmelCase_ : Optional[int] = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE__ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 125 | 1 |
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class a_ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__A = VQModel
__A = "sample"
@property
def lowercase__ ( self : Tuple , lowercase : List[Any]=(32, 32) ):
"""simple docstring"""
lowercase_ :List[Any] = 4
lowercase_ :int = 3
lowercase_ :Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase )
return {"sample": image}
@property
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
return (3, 32, 32)
@property
def lowercase__ ( self : str ):
"""simple docstring"""
return (3, 32, 32)
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
lowercase_ :Any = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
lowercase_ :Tuple = self.dummy_input
return init_dict, inputs_dict
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
pass
def lowercase__ ( self : List[str] ):
"""simple docstring"""
pass
def lowercase__ ( self : Tuple ):
"""simple docstring"""
lowercase_ , lowercase_ :int = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(lowercase )
lowercase_ :int = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
lowercase_ :Optional[Any] = VQModel.from_pretrained("fusing/vqgan-dummy" )
model.to(lowercase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowercase_ :Any = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
lowercase_ :str = image.to(lowercase )
with torch.no_grad():
lowercase_ :int = model(lowercase ).sample
lowercase_ :Optional[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowercase_ :Tuple = torch.tensor([-0.01_53, -0.40_44, -0.18_80, -0.51_61, -0.24_18, -0.40_72, -0.16_12, -0.06_33, -0.01_43] )
# fmt: on
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1e-3 ) )
| 147 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class a_ ( _lowerCAmelCase ):
def __init__( self : Any , lowercase : int , lowercase : Union[str, Any]=13 , lowercase : List[str]=7 , lowercase : List[str]=True , lowercase : int=True , lowercase : Tuple=True , lowercase : int=True , lowercase : List[Any]=99 , lowercase : Optional[int]=32 , lowercase : Dict=5 , lowercase : Optional[int]=4 , lowercase : List[str]=37 , lowercase : Tuple="gelu" , lowercase : List[Any]=0.1 , lowercase : Tuple=0.1 , lowercase : List[str]=512 , lowercase : str=16 , lowercase : Tuple=2 , lowercase : List[Any]=0.02 , lowercase : Dict=False , lowercase : Dict=True , lowercase : int="None" , lowercase : Optional[Any]=3 , lowercase : Dict=4 , lowercase : List[Any]=None , ):
"""simple docstring"""
lowercase_ :int = parent
lowercase_ :str = batch_size
lowercase_ :Tuple = seq_length
lowercase_ :Union[str, Any] = is_training
lowercase_ :Dict = use_input_mask
lowercase_ :Any = use_token_type_ids
lowercase_ :Tuple = use_labels
lowercase_ :Dict = vocab_size
lowercase_ :Tuple = hidden_size
lowercase_ :Union[str, Any] = num_hidden_layers
lowercase_ :int = num_attention_heads
lowercase_ :List[Any] = intermediate_size
lowercase_ :Tuple = hidden_act
lowercase_ :str = hidden_dropout_prob
lowercase_ :Any = attention_probs_dropout_prob
lowercase_ :List[Any] = max_position_embeddings
lowercase_ :Union[str, Any] = type_vocab_size
lowercase_ :Union[str, Any] = type_sequence_label_size
lowercase_ :Any = initializer_range
lowercase_ :List[Any] = num_labels
lowercase_ :str = num_choices
lowercase_ :Optional[Any] = relative_attention
lowercase_ :Tuple = position_biased_input
lowercase_ :Union[str, Any] = pos_att_type
lowercase_ :Tuple = scope
def lowercase__ ( self : Dict ):
"""simple docstring"""
lowercase_ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ :Union[str, Any] = None
if self.use_input_mask:
lowercase_ :int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowercase_ :List[Any] = None
if self.use_token_type_ids:
lowercase_ :Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ :str = None
lowercase_ :Union[str, Any] = None
lowercase_ :List[str] = None
if self.use_labels:
lowercase_ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ :Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ :str = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ :Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
lowercase_ :Dict = self.get_config()
lowercase_ :Optional[Any] = 300
return config
def lowercase__ ( self : Optional[Any] , lowercase : Dict ):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowercase__ ( self : Optional[Any] , lowercase : Union[str, Any] , lowercase : List[Any] , lowercase : List[Any] , lowercase : Tuple , lowercase : str , lowercase : Optional[Any] , lowercase : Optional[int] ):
"""simple docstring"""
lowercase_ :str = DebertaModel(config=lowercase )
model.to(lowercase )
model.eval()
lowercase_ :Optional[int] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase )[0]
lowercase_ :Union[str, Any] = model(lowercase , token_type_ids=lowercase )[0]
lowercase_ :Dict = model(lowercase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowercase__ ( self : Dict , lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : Tuple , lowercase : Dict , lowercase : Dict , lowercase : str , lowercase : Tuple ):
"""simple docstring"""
lowercase_ :Dict = DebertaForMaskedLM(config=lowercase )
model.to(lowercase )
model.eval()
lowercase_ :Optional[Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Union[str, Any] , lowercase : Union[str, Any] , lowercase : Dict , lowercase : List[Any] , lowercase : int , lowercase : Dict ):
"""simple docstring"""
lowercase_ :Dict = self.num_labels
lowercase_ :int = DebertaForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
lowercase_ :Union[str, Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowercase )
def lowercase__ ( self : List[Any] , lowercase : List[str] , lowercase : Tuple , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : Dict , lowercase : int ):
"""simple docstring"""
lowercase_ :List[str] = self.num_labels
lowercase_ :Optional[int] = DebertaForTokenClassification(config=lowercase )
model.to(lowercase )
model.eval()
lowercase_ :Dict = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Union[str, Any] , lowercase : Tuple , lowercase : Any , lowercase : List[Any] , lowercase : List[Any] , lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : List[Any] ):
"""simple docstring"""
lowercase_ :Any = DebertaForQuestionAnswering(config=lowercase )
model.to(lowercase )
model.eval()
lowercase_ :List[Any] = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Any ):
"""simple docstring"""
lowercase_ :Optional[int] = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) :List[str] = config_and_inputs
lowercase_ :Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a_ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__A = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__A = (
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
__A = True
__A = False
__A = False
__A = False
__A = False
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
lowercase_ :List[Any] = DebertaModelTester(self )
lowercase_ :str = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
lowercase_ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowercase )
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
lowercase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowercase )
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
lowercase_ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowercase )
def lowercase__ ( self : int ):
"""simple docstring"""
lowercase_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowercase )
def lowercase__ ( self : Tuple ):
"""simple docstring"""
lowercase_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowercase )
@slow
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ :Tuple = DebertaModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def lowercase__ ( self : Dict ):
"""simple docstring"""
pass
@slow
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
lowercase_ :Optional[Any] = DebertaModel.from_pretrained("microsoft/deberta-base" )
lowercase_ :Dict = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
lowercase_ :Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase_ :Optional[int] = model(lowercase , attention_mask=lowercase )[0]
# compare the actual values for a slice.
lowercase_ :List[Any] = torch.tensor(
[[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase , atol=1e-4 ) , F'{output[:, 1:4, 1:4]}' )
| 147 | 1 |
import qiskit
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> qiskit.result.counts.Counts:
UpperCamelCase__ : Tuple = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
UpperCamelCase__ : List[Any] = qiskit.QuantumCircuit(__lowerCAmelCase , __lowerCAmelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
UpperCamelCase__ : List[Any] = qiskit.execute(__lowerCAmelCase , __lowerCAmelCase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase : Optional[Any] =single_qubit_measure(2, 2)
print(F"""Total count for various states are: {counts}""") | 189 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class __a ( datasets.BeamBasedBuilder ):
def __lowercase ( self : List[str] ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=SCREAMING_SNAKE_CASE , )
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(SCREAMING_SNAKE_CASE )
class __a ( datasets.BeamBasedBuilder ):
def __lowercase ( self : Any ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=SCREAMING_SNAKE_CASE , )
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class __a ( A__ ):
@require_beam
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : int = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase__ : List[Any] = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , F'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCamelCase__ : Optional[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , SCREAMING_SNAKE_CASE )
self.assertEqual(dset["train"].info.splits["train"].num_examples , SCREAMING_SNAKE_CASE )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def __lowercase ( self : Any ):
'''simple docstring'''
import apache_beam as beam
UpperCamelCase__ : Any = beam.io.parquetio.WriteToParquet
UpperCamelCase__ : Tuple = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase__ : str = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
UpperCamelCase__ : Optional[Any] = partial(SCREAMING_SNAKE_CASE , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , F'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , F'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCamelCase__ : List[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , SCREAMING_SNAKE_CASE )
self.assertEqual(dset["train"].info.splits["train"].num_examples , SCREAMING_SNAKE_CASE )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def __lowercase ( self : List[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase__ : int = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase__ : Any = NestedBeamDataset(cache_dir=SCREAMING_SNAKE_CASE , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , F'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
UpperCamelCase__ : List[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , SCREAMING_SNAKE_CASE )
self.assertEqual(dset["train"].info.splits["train"].num_examples , SCREAMING_SNAKE_CASE )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset | 189 | 1 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
UpperCamelCase = get_logger(__name__)
UpperCamelCase = R'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n'
class __lowerCamelCase :
"""simple docstring"""
@add_start_docstrings(SCREAMING_SNAKE_CASE__ )
def __call__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : jnp.ndarray ) -> jnp.ndarray:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class __lowerCamelCase :
"""simple docstring"""
@add_start_docstrings(SCREAMING_SNAKE_CASE__ )
def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : jnp.ndarray ) -> jnp.ndarray:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
@add_start_docstrings(SCREAMING_SNAKE_CASE__ )
def __call__( self : str , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> jnp.ndarray:
for processor in self:
lowerCAmelCase__ = inspect.signature(processor.__call__ ).parameters
if len(SCREAMING_SNAKE_CASE__ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'Make sure that all the required parameters: {list(function_args.keys() )} for '
f'{processor.__class__} are passed to the logits processor.' )
lowerCAmelCase__ = processor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
else:
lowerCAmelCase__ = processor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return scores
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE__ : float ) -> Any:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not (temperature > 0):
raise ValueError(f'`temperature` has to be a strictly positive float, but is {temperature}' )
lowerCAmelCase__ = temperature
def __call__( self : Dict , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : int ) -> jnp.ndarray:
lowerCAmelCase__ = scores / self.temperature
return scores
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float = -float("Inf" ) , SCREAMING_SNAKE_CASE__ : int = 1 ) -> Tuple:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'`top_p` has to be a float > 0 and < 1, but is {top_p}' )
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or (min_tokens_to_keep < 1):
raise ValueError(f'`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}' )
lowerCAmelCase__ = top_p
lowerCAmelCase__ = filter_value
lowerCAmelCase__ = min_tokens_to_keep
def __call__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : int ) -> jnp.ndarray:
lowerCAmelCase__ , lowerCAmelCase__ = lax.top_k(SCREAMING_SNAKE_CASE__ , scores.shape[-1] )
lowerCAmelCase__ = jnp.full_like(SCREAMING_SNAKE_CASE__ , self.filter_value )
lowerCAmelCase__ = jax.nn.softmax(SCREAMING_SNAKE_CASE__ , axis=-1 ).cumsum(axis=-1 )
lowerCAmelCase__ = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
lowerCAmelCase__ = jnp.roll(SCREAMING_SNAKE_CASE__ , 1 )
score_mask |= score_mask.at[:, 0].set(SCREAMING_SNAKE_CASE__ )
# min tokens to keep
lowerCAmelCase__ = score_mask.at[:, : self.min_tokens_to_keep].set(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = jnp.where(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = jax.lax.sort_key_val(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[-1]
return next_scores
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float = -float("Inf" ) , SCREAMING_SNAKE_CASE__ : int = 1 ) -> Optional[int]:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or top_k <= 0:
raise ValueError(f'`top_k` has to be a strictly positive integer, but is {top_k}' )
lowerCAmelCase__ = max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = filter_value
def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : int ) -> jnp.ndarray:
lowerCAmelCase__ , lowerCAmelCase__ = scores.shape
lowerCAmelCase__ = jnp.full(batch_size * vocab_size , self.filter_value )
lowerCAmelCase__ = min(self.top_k , scores.shape[-1] ) # Safety check
lowerCAmelCase__ , lowerCAmelCase__ = lax.top_k(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = jnp.broadcast_to((jnp.arange(SCREAMING_SNAKE_CASE__ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
lowerCAmelCase__ = topk_scores.flatten()
lowerCAmelCase__ = topk_indices.flatten() + shift
lowerCAmelCase__ = next_scores_flat.at[topk_indices_flat].set(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = next_scores_flat.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return next_scores
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : int ) -> List[str]:
lowerCAmelCase__ = bos_token_id
def __call__( self : List[str] , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : int ) -> jnp.ndarray:
lowerCAmelCase__ = jnp.full(scores.shape , -float("inf" ) )
lowerCAmelCase__ = 1 - jnp.bool_(cur_len - 1 )
lowerCAmelCase__ = jnp.where(SCREAMING_SNAKE_CASE__ , new_scores.at[:, self.bos_token_id].set(0 ) , SCREAMING_SNAKE_CASE__ )
return scores
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> int:
lowerCAmelCase__ = max_length
lowerCAmelCase__ = eos_token_id
def __call__( self : int , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : int ) -> jnp.ndarray:
lowerCAmelCase__ = jnp.full(scores.shape , -float("inf" ) )
lowerCAmelCase__ = 1 - jnp.bool_(cur_len - self.max_length + 1 )
lowerCAmelCase__ = jnp.where(SCREAMING_SNAKE_CASE__ , new_scores.at[:, self.eos_token_id].set(0 ) , SCREAMING_SNAKE_CASE__ )
return scores
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> Optional[int]:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or min_length < 0:
raise ValueError(f'`min_length` has to be a positive integer, but is {min_length}' )
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or eos_token_id < 0:
raise ValueError(f'`eos_token_id` has to be a positive integer, but is {eos_token_id}' )
lowerCAmelCase__ = min_length
lowerCAmelCase__ = eos_token_id
def __call__( self : List[Any] , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : int ) -> jnp.ndarray:
# create boolean flag to decide if min length penalty should be applied
lowerCAmelCase__ = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
lowerCAmelCase__ = jnp.where(SCREAMING_SNAKE_CASE__ , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , SCREAMING_SNAKE_CASE__ )
return scores
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Any:
lowerCAmelCase__ = list(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = begin_index
def __call__( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int ) -> Tuple:
lowerCAmelCase__ = 1 - jnp.bool_(cur_len - self.begin_index )
lowerCAmelCase__ = jnp.where(SCREAMING_SNAKE_CASE__ , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , SCREAMING_SNAKE_CASE__ )
return scores
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : list ) -> int:
lowerCAmelCase__ = list(SCREAMING_SNAKE_CASE__ )
def __call__( self : Dict , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : int ) -> jnp.ndarray:
lowerCAmelCase__ = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> List[Any]:
lowerCAmelCase__ = dict(SCREAMING_SNAKE_CASE__ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
lowerCAmelCase__ = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
lowerCAmelCase__ = force_token_array.at[index].set(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = jnp.intaa(SCREAMING_SNAKE_CASE__ )
def __call__( self : List[str] , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : jnp.ndarray , SCREAMING_SNAKE_CASE__ : int ) -> jnp.ndarray:
def _force_token(SCREAMING_SNAKE_CASE__ : Any ):
lowerCAmelCase__ = scores.shape[0]
lowerCAmelCase__ = self.force_token_array[generation_idx]
lowerCAmelCase__ = jnp.ones_like(SCREAMING_SNAKE_CASE__ , dtype=scores.dtype ) * -float("inf" )
lowerCAmelCase__ = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
lowerCAmelCase__ = lax.dynamic_update_slice(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , (0, current_token) )
return new_scores
lowerCAmelCase__ = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(SCREAMING_SNAKE_CASE__ ) , lambda: scores , ) , )
return scores
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Dict:
lowerCAmelCase__ = generate_config.eos_token_id
lowerCAmelCase__ = generate_config.no_timestamps_token_id
lowerCAmelCase__ = generate_config.no_timestamps_token_id + 1
lowerCAmelCase__ = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(SCREAMING_SNAKE_CASE__ , "max_initial_timestamp_index" ):
lowerCAmelCase__ = generate_config.max_initial_timestamp_index
else:
lowerCAmelCase__ = model_config.vocab_size
if self.max_initial_timestamp_index is None:
lowerCAmelCase__ = model_config.vocab_size
def __call__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
# suppress <|notimestamps|> which is handled by without_timestamps
lowerCAmelCase__ = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int ):
lowerCAmelCase__ = jnp.where((cur_len - self.begin_index) >= 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = jnp.where((cur_len - self.begin_index) < 2 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
return jnp.where(
SCREAMING_SNAKE_CASE__ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = jax.vmap(SCREAMING_SNAKE_CASE__ )(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = jnp.where(cur_len == self.begin_index , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = self.timestamp_begin + self.max_initial_timestamp_index
lowerCAmelCase__ = jnp.where(
SCREAMING_SNAKE_CASE__ , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , SCREAMING_SNAKE_CASE__ , )
# if sum of probability over timestamps is above any other token, sample timestamp
lowerCAmelCase__ = jax.nn.log_softmax(SCREAMING_SNAKE_CASE__ , axis=-1 )
def handle_cumulative_probs(SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict ):
lowerCAmelCase__ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
lowerCAmelCase__ = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = jax.vmap(SCREAMING_SNAKE_CASE__ )(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return scores
| 363 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = "Speech2TextFeatureExtractor"
snake_case__ = "Speech2TextTokenizer"
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.feature_extractor
lowerCAmelCase__ = False
def __call__( self : str , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
lowerCAmelCase__ = kwargs.pop("raw_speech" )
else:
lowerCAmelCase__ = kwargs.pop("audio" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = kwargs.pop("sampling_rate" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = kwargs.pop("text" , SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
lowerCAmelCase__ = args[0]
lowerCAmelCase__ = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
lowerCAmelCase__ = self.feature_extractor(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , sampling_rate=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if text is not None:
lowerCAmelCase__ = self.tokenizer(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCAmelCase__ = encodings["input_ids"]
return inputs
def a ( self : str , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def a ( self : Dict , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[Any]:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@contextmanager
def a ( self : Union[str, Any] ) -> Any:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
lowerCAmelCase__ = True
lowerCAmelCase__ = self.tokenizer
yield
lowerCAmelCase__ = self.feature_extractor
lowerCAmelCase__ = False
| 221 | 0 |
'''simple docstring'''
import string
def a ( lowerCamelCase__ ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
A_ : int = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
A_ : Dict = string.ascii_uppercase.find(lowerCamelCase__ )
A_ : Tuple = num - key
if num < 0:
A_ : Dict = num + len(string.ascii_uppercase )
A_ : Any = translated + string.ascii_uppercase[num]
else:
A_ : Tuple = translated + symbol
print(f'Decryption using Key #{key}: {translated}' )
def a ( ):
'''simple docstring'''
A_ : Union[str, Any] = input("""Encrypted message: """ )
A_ : str = message.upper()
decrypt(lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 206 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def a ( lowerCamelCase__=None ):
'''simple docstring'''
A_ : int = argparse.ArgumentParser(add_help=lowerCamelCase__ , allow_abbrev=lowerCamelCase__ )
# The main config parser
A_ : int = config_command_parser(lowerCamelCase__ )
# The subparser to add commands to
A_ : int = config_parser.add_subparsers(title="""subcommands""" , dest="""subcommand""" )
# Then add other parsers with the parent parser
default_command_parser(lowerCamelCase__ , parents=[parent_parser] )
update_command_parser(lowerCamelCase__ , parents=[parent_parser] )
return config_parser
def a ( ):
'''simple docstring'''
A_ : Optional[int] = get_config_parser()
A_ : List[str] = config_parser.parse_args()
if not hasattr(lowerCamelCase__ , """func""" ):
config_parser.print_help()
exit(1 )
# Run
args.func(lowerCamelCase__ )
if __name__ == "__main__":
main() | 206 | 1 |
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : int = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : int = flatten_dict(SCREAMING_SNAKE_CASE__ )
return flax_params
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Dict = {}
__lowerCamelCase : Optional[Any] = {
'token_embedder': 'embeddings',
'encoder_norm': 'layernorm',
'kernel': 'weight',
'.out': '.output',
'scale': 'weight',
'embedders_0.pos_embedding': 'row_embedder.weight',
'embedders_1.pos_embedding': 'column_embedder.weight',
}
__lowerCamelCase : Union[str, Any] = {
'query': 'attention.query',
'key': 'attention.key',
'value': 'attention.value',
'output.dense': 'output',
'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o',
'pre_self_attention_layer_norm': 'self_attention.layer_norm',
'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm',
'mlp.': 'mlp.DenseReluDense.',
'pre_mlp_layer_norm': 'mlp.layer_norm',
'self_attention.o': 'self_attention.attention.o',
'decoder.embeddings.embedding': 'decoder.embed_tokens.weight',
'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight',
'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.logits_dense.weight': 'decoder.lm_head.weight',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
__lowerCamelCase : Any = '.'.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
__lowerCamelCase : Dict = new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
__lowerCamelCase : Any = new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
__lowerCamelCase : Any = re.sub(r'layers_(\d+)' , r'layer.\1' , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Any = new_key.replace('encoder' , 'encoder.encoder' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
__lowerCamelCase : Optional[int] = re.sub(r'layers_(\d+)' , r'layer.\1' , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[Any] = flax_dict[key]
__lowerCamelCase : Optional[Any] = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
__lowerCamelCase : int = torch.from_numpy(converted_dict[key].T )
else:
__lowerCamelCase : Optional[Any] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False ):
__lowerCamelCase : Optional[int] = get_flax_param(SCREAMING_SNAKE_CASE__ )
if not use_large:
__lowerCamelCase : List[Any] = PixaStructVisionConfig()
__lowerCamelCase : List[str] = PixaStructTextConfig()
else:
__lowerCamelCase : str = PixaStructVisionConfig(
hidden_size=1_536 , d_ff=3_968 , num_attention_heads=24 , num_hidden_layers=18 )
__lowerCamelCase : List[str] = PixaStructTextConfig(hidden_size=1_536 , d_ff=3_968 , num_heads=24 , num_layers=18 )
__lowerCamelCase : Dict = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Union[str, Any] = PixaStructForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : List[str] = rename_and_convert_flax_params(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer' )
__lowerCamelCase : Optional[Any] = PixaStructImageProcessor()
__lowerCamelCase : Tuple = PixaStructProcessor(image_processor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
if use_large:
__lowerCamelCase : List[Any] = 4_096
__lowerCamelCase : Tuple = True
# mkdir if needed
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
print('Model saved in {}'.format(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
lowercase_ = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 355 |
import datasets
lowercase_ = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n'
lowercase_ = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n'
lowercase_ = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n'
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def _snake_case ( self: Union[str, Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def _snake_case ( self: int , a: Optional[Any] , a: Optional[Any] ):
return {"accuracy": simple_accuracy(a , a )}
| 194 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase):
UpperCAmelCase__ : str = DiTPipeline
UpperCAmelCase__ : str = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
UpperCAmelCase__ : Dict = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
UpperCAmelCase__ : str = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
UpperCAmelCase__ : List[str] = False
def lowercase_ ( self :str ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
__A = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_A , activation_fn='gelu-approximate' , num_embeds_ada_norm=1_000 , norm_type='ada_norm_zero' , norm_elementwise_affine=_A , )
__A = AutoencoderKL()
__A = DDIMScheduler()
__A = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def lowercase_ ( self :List[str] , _A :Any , _A :Union[str, Any]=0 ) -> str:
'''simple docstring'''
if str(_A ).startswith('mps' ):
__A = torch.manual_seed(_A )
else:
__A = torch.Generator(device=_A ).manual_seed(_A )
__A = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowercase_ ( self :List[str] ) -> List[Any]:
'''simple docstring'''
__A = 'cpu'
__A = self.get_dummy_components()
__A = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__A = self.get_dummy_inputs(_A )
__A = pipe(**_A ).images
__A = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__A = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
__A = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1E-3 )
def lowercase_ ( self :Optional[Any] ) -> List[Any]:
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=_A , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowercase_ ( self :Tuple ) -> Optional[int]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class UpperCamelCase__ ( unittest.TestCase):
def lowercase_ ( self :str ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self :str ) -> Tuple:
'''simple docstring'''
__A = torch.manual_seed(0 )
__A = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
__A = ['vase', 'umbrella', 'white shark', 'white wolf']
__A = pipe.get_label_ids(_A )
__A = pipe(_A , generator=_A , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(_A , _A ):
__A = load_numpy(
F'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' )
assert np.abs((expected_image - image).max() ) < 1E-2
def lowercase_ ( self :int ) -> Union[str, Any]:
'''simple docstring'''
__A = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
__A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
__A = ['vase', 'umbrella']
__A = pipe.get_label_ids(_A )
__A = torch.manual_seed(0 )
__A = pipe(_A , generator=_A , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(_A , _A ):
__A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
F'/dit/{word}_512.npy' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 161 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase):
UpperCAmelCase__ : str = KandinskyImgaImgPipeline
UpperCAmelCase__ : Optional[int] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image']
UpperCAmelCase__ : Union[str, Any] = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
]
UpperCAmelCase__ : Union[str, Any] = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
UpperCAmelCase__ : Any = False
@property
def lowercase_ ( self :Tuple ) -> Any:
'''simple docstring'''
return 32
@property
def lowercase_ ( self :Optional[int] ) -> str:
'''simple docstring'''
return 32
@property
def lowercase_ ( self :Optional[Any] ) -> str:
'''simple docstring'''
return self.time_input_dim
@property
def lowercase_ ( self :Optional[Any] ) -> Any:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowercase_ ( self :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return 100
@property
def lowercase_ ( self :Tuple ) -> Tuple:
'''simple docstring'''
__A = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def lowercase_ ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__A = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
__A = MultilingualCLIP(_A )
__A = text_encoder.eval()
return text_encoder
@property
def lowercase_ ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__A = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__A = UNetaDConditionModel(**_A )
return model
@property
def lowercase_ ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase_ ( self :Optional[int] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
__A = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase_ ( self :List[str] ) -> str:
'''simple docstring'''
__A = self.dummy_text_encoder
__A = self.dummy_tokenizer
__A = self.dummy_unet
__A = self.dummy_movq
__A = {
'num_train_timesteps': 1_000,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
__A = DDIMScheduler(**_A )
__A = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowercase_ ( self :Dict , _A :Union[str, Any] , _A :Optional[int]=0 ) -> str:
'''simple docstring'''
__A = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_A ) ).to(_A )
__A = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_A )
# create init_image
__A = floats_tensor((1, 3, 64, 64) , rng=random.Random(_A ) ).to(_A )
__A = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__A = Image.fromarray(np.uinta(_A ) ).convert('RGB' ).resize((256, 256) )
if str(_A ).startswith('mps' ):
__A = torch.manual_seed(_A )
else:
__A = torch.Generator(device=_A ).manual_seed(_A )
__A = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def lowercase_ ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
__A = 'cpu'
__A = self.get_dummy_components()
__A = self.pipeline_class(**_A )
__A = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__A = pipe(**self.get_dummy_inputs(_A ) )
__A = output.images
__A = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
__A = image[0, -3:, -3:, -1]
__A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__A = np.array(
[0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase):
def lowercase_ ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self :Dict ) -> Optional[int]:
'''simple docstring'''
__A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy' )
__A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
__A = 'A red cartoon frog, 4k'
__A = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_A )
__A = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1' , torch_dtype=torch.floataa )
__A = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
__A = torch.Generator(device='cpu' ).manual_seed(0 )
__A , __A = pipe_prior(
_A , generator=_A , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
__A = pipeline(
_A , image=_A , image_embeds=_A , negative_image_embeds=_A , generator=_A , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='np' , )
__A = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_A , _A )
| 161 | 1 |
'''simple docstring'''
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__SCREAMING_SNAKE_CASE :Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :int = R'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class A_ ( lowerCAmelCase_ ):
@add_start_docstrings(snake_case_ )
def __call__( self : List[Any] , snake_case_ : torch.LongTensor , snake_case_ : torch.FloatTensor , **snake_case_ : List[Any] ):
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class A_ ( lowerCAmelCase_ ):
def __init__( self : List[str] , snake_case_ : int , snake_case_ : Optional[int] = None ):
_UpperCAmelCase = max_length
_UpperCAmelCase = max_position_embeddings
@add_start_docstrings(snake_case_ )
def __call__( self : Optional[int] , snake_case_ : torch.LongTensor , snake_case_ : torch.FloatTensor , **snake_case_ : List[Any] ):
_UpperCAmelCase = input_ids.shape[-1]
_UpperCAmelCase = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
f'maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '
"exceptions, performance degradation, or nothing at all." )
return is_done
class A_ ( lowerCAmelCase_ ):
def __init__( self : str , snake_case_ : int , snake_case_ : int ):
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
f'Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '
"with `max_length = start_length + max_new_tokens` instead." , snake_case_ , )
_UpperCAmelCase = start_length
_UpperCAmelCase = max_new_tokens
_UpperCAmelCase = start_length + max_new_tokens
@add_start_docstrings(snake_case_ )
def __call__( self : Optional[Any] , snake_case_ : torch.LongTensor , snake_case_ : torch.FloatTensor , **snake_case_ : Optional[Any] ):
return input_ids.shape[-1] >= self.max_length
class A_ ( lowerCAmelCase_ ):
def __init__( self : Dict , snake_case_ : float , snake_case_ : Optional[float] = None ):
_UpperCAmelCase = max_time
_UpperCAmelCase = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(snake_case_ )
def __call__( self : Any , snake_case_ : torch.LongTensor , snake_case_ : torch.FloatTensor , **snake_case_ : Any ):
return time.time() - self.initial_timestamp > self.max_time
class A_ ( lowerCAmelCase_ ):
@add_start_docstrings(snake_case_ )
def __call__( self : int , snake_case_ : torch.LongTensor , snake_case_ : torch.FloatTensor , **snake_case_ : Union[str, Any] ):
return any(criteria(snake_case_ , snake_case_ ) for criteria in self )
@property
def lowercase ( self : Tuple ):
for stopping_criterium in self:
if isinstance(snake_case_ , snake_case_ ):
return stopping_criterium.max_length
elif isinstance(snake_case_ , snake_case_ ):
return stopping_criterium.max_length
return None
def UpperCAmelCase_ ( __lowercase : StoppingCriteriaList , __lowercase : int ) -> StoppingCriteriaList:
'''simple docstring'''
_UpperCAmelCase = stopping_criteria.max_length
_UpperCAmelCase = deepcopy(__lowercase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , __lowercase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=__lowercase ) )
return new_stopping_criteria
| 156 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def UpperCAmelCase_ ( __lowercase : str , __lowercase : Tuple , __lowercase : Union[str, Any] , __lowercase : str , __lowercase : Dict=True , __lowercase : Dict="pt" ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = {"add_prefix_space": True} if isinstance(__lowercase , __lowercase ) and not line.startswith(" " ) else {}
_UpperCAmelCase = padding_side
return tokenizer(
[line] , max_length=__lowercase , padding="max_length" if pad_to_max_length else None , truncation=__lowercase , return_tensors=__lowercase , add_special_tokens=__lowercase , **__lowercase , )
def UpperCAmelCase_ ( __lowercase : List[Any] , __lowercase : Union[str, Any] , __lowercase : Tuple=None , ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = input_ids.ne(__lowercase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class A_ ( lowerCAmelCase_ ):
def __init__( self : Union[str, Any] , snake_case_ : int , snake_case_ : str , snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : int="train" , snake_case_ : Tuple=None , snake_case_ : str=None , snake_case_ : Optional[Any]=None , snake_case_ : Any="" , ):
super().__init__()
_UpperCAmelCase = Path(snake_case_ ).joinpath(type_path + ".source" )
_UpperCAmelCase = Path(snake_case_ ).joinpath(type_path + ".target" )
_UpperCAmelCase = self.get_char_lens(self.src_file )
_UpperCAmelCase = max_source_length
_UpperCAmelCase = max_target_length
assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}'
_UpperCAmelCase = tokenizer
_UpperCAmelCase = prefix
if n_obs is not None:
_UpperCAmelCase = self.src_lens[:n_obs]
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
def __len__( self : List[Any] ):
return len(self.src_lens )
def __getitem__( self : Optional[Any] , snake_case_ : List[Any] ):
_UpperCAmelCase = index + 1 # linecache starts at 1
_UpperCAmelCase = self.prefix + linecache.getline(str(self.src_file ) , snake_case_ ).rstrip("\n" )
_UpperCAmelCase = linecache.getline(str(self.tgt_file ) , snake_case_ ).rstrip("\n" )
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer , snake_case_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , snake_case_ ) else self.tokenizer
)
_UpperCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , snake_case_ ) else self.tokenizer
_UpperCAmelCase = encode_line(snake_case_ , snake_case_ , self.max_source_length , "right" )
_UpperCAmelCase = encode_line(snake_case_ , snake_case_ , self.max_target_length , "right" )
_UpperCAmelCase = source_inputs["input_ids"].squeeze()
_UpperCAmelCase = target_inputs["input_ids"].squeeze()
_UpperCAmelCase = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowercase ( snake_case_ : Optional[Any] ):
return [len(snake_case_ ) for x in Path(snake_case_ ).open().readlines()]
def lowercase ( self : List[str] , snake_case_ : Optional[int] ):
_UpperCAmelCase = torch.stack([x["input_ids"] for x in batch] )
_UpperCAmelCase = torch.stack([x["attention_mask"] for x in batch] )
_UpperCAmelCase = torch.stack([x["decoder_input_ids"] for x in batch] )
_UpperCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , snake_case_ )
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , snake_case_ )
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = trim_batch(snake_case_ , snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = trim_batch(snake_case_ , snake_case_ , attention_mask=snake_case_ )
_UpperCAmelCase = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
__SCREAMING_SNAKE_CASE :Union[str, Any] = getLogger(__name__)
def UpperCAmelCase_ ( __lowercase : List[List] ) -> List[Any]:
'''simple docstring'''
return list(itertools.chain.from_iterable(__lowercase ) )
def UpperCAmelCase_ ( __lowercase : str ) -> None:
'''simple docstring'''
_UpperCAmelCase = get_git_info()
save_json(__lowercase , os.path.join(__lowercase , "git_log.json" ) )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : Dict , __lowercase : Union[str, Any]=4 , **__lowercase : Any ) -> int:
'''simple docstring'''
with open(__lowercase , "w" ) as f:
json.dump(__lowercase , __lowercase , indent=__lowercase , **__lowercase )
def UpperCAmelCase_ ( __lowercase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
with open(__lowercase ) as f:
return json.load(__lowercase )
def UpperCAmelCase_ ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = git.Repo(search_parent_directories=__lowercase )
_UpperCAmelCase = {
"repo_id": str(__lowercase ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def UpperCAmelCase_ ( __lowercase : Callable , __lowercase : Iterable ) -> List:
'''simple docstring'''
return list(map(__lowercase , __lowercase ) )
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : str ) -> List[Any]:
'''simple docstring'''
with open(__lowercase , "wb" ) as f:
return pickle.dump(__lowercase , __lowercase )
def UpperCAmelCase_ ( __lowercase : str ) -> int:
'''simple docstring'''
def remove_articles(__lowercase : Union[str, Any] ):
return re.sub(r"\b(a|an|the)\b" , " " , __lowercase )
def white_space_fix(__lowercase : str ):
return " ".join(text.split() )
def remove_punc(__lowercase : Union[str, Any] ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowercase : Union[str, Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowercase ) ) ) )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : Union[str, Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = normalize_answer(__lowercase ).split()
_UpperCAmelCase = normalize_answer(__lowercase ).split()
_UpperCAmelCase = Counter(__lowercase ) & Counter(__lowercase )
_UpperCAmelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCAmelCase = 1.0 * num_same / len(__lowercase )
_UpperCAmelCase = 1.0 * num_same / len(__lowercase )
_UpperCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : Optional[Any] ) -> int:
'''simple docstring'''
return normalize_answer(__lowercase ) == normalize_answer(__lowercase )
def UpperCAmelCase_ ( __lowercase : List[str] , __lowercase : List[str] ) -> Dict:
'''simple docstring'''
assert len(__lowercase ) == len(__lowercase )
_UpperCAmelCase = 0
for hypo, pred in zip(__lowercase , __lowercase ):
em += exact_match_score(__lowercase , __lowercase )
if len(__lowercase ) > 0:
em /= len(__lowercase )
return {"em": em}
def UpperCAmelCase_ ( __lowercase : Tuple ) -> List[str]:
'''simple docstring'''
return model_prefix.startswith("rag" )
def UpperCAmelCase_ ( __lowercase : List[str] , __lowercase : int , __lowercase : int ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCAmelCase = "dropout_rate"
for p in extra_params:
if getattr(__lowercase , __lowercase , __lowercase ):
if not hasattr(__lowercase , __lowercase ) and not hasattr(__lowercase , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(__lowercase ) )
delattr(__lowercase , __lowercase )
continue
_UpperCAmelCase = p if hasattr(__lowercase , __lowercase ) else equivalent_param[p]
setattr(__lowercase , __lowercase , getattr(__lowercase , __lowercase ) )
delattr(__lowercase , __lowercase )
return hparams, config
| 156 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a : List[str] = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
a : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 147 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("""repo_id""" , ["""canonical_dataset_name""", """org-name/dataset-name"""] )
@pytest.mark.parametrize("""path""" , ["""filename.csv""", """filename with blanks.csv"""] )
@pytest.mark.parametrize("""revision""" , [None, """v2"""] )
def lowerCAmelCase_ (lowerCAmelCase__: Any , lowerCAmelCase__: int , lowerCAmelCase__: Optional[int] ):
"""simple docstring"""
UpperCAmelCase_: int = hf_hub_url(repo_id=lowerCAmelCase__ , path=lowerCAmelCase__ , revision=lowerCAmelCase__ )
assert url == F'https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(lowerCAmelCase__ )}'
| 147 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any]=13 , lowerCAmelCase_ : int=7 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Union[str, Any]=99 , lowerCAmelCase_ : Optional[Any]=32 , lowerCAmelCase_ : Any=5 , lowerCAmelCase_ : Dict=4 , lowerCAmelCase_ : Any=37 , lowerCAmelCase_ : Tuple="gelu" , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : List[str]=5_12 , lowerCAmelCase_ : Any=16 , lowerCAmelCase_ : Optional[int]=2 , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : Tuple=4 , lowerCAmelCase_ : Optional[int]=None , ) -> Tuple:
'''simple docstring'''
A__ : Optional[Any] =parent
A__ : int =batch_size
A__ : int =seq_length
A__ : Union[str, Any] =is_training
A__ : int =use_token_type_ids
A__ : str =use_labels
A__ : List[Any] =vocab_size
A__ : int =hidden_size
A__ : List[str] =num_hidden_layers
A__ : str =num_attention_heads
A__ : Optional[int] =intermediate_size
A__ : str =hidden_act
A__ : Any =hidden_dropout_prob
A__ : List[Any] =attention_probs_dropout_prob
A__ : Union[str, Any] =max_position_embeddings
A__ : Tuple =type_vocab_size
A__ : Union[str, Any] =type_sequence_label_size
A__ : Optional[int] =initializer_range
A__ : Optional[int] =num_labels
A__ : List[Any] =num_choices
A__ : Tuple =scope
A__ : int =self.vocab_size - 1
def lowercase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
A__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Any =None
if self.use_token_type_ids:
A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : Tuple =None
A__ : Optional[Any] =None
A__ : Optional[int] =None
if self.use_labels:
A__ : Any =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Dict =ids_tensor([self.batch_size] , self.num_choices )
A__ : Union[str, Any] =OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
A__ : List[str] =ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowercase__ ( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , *lowerCAmelCase_ : List[Any] ) -> int:
'''simple docstring'''
A__ : Optional[int] =OpenAIGPTModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Union[str, Any] =model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , head_mask=lowerCAmelCase_ )
A__ : Any =model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
A__ : Tuple =model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , *lowerCAmelCase_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
A__ : Union[str, Any] =OpenAIGPTLMHeadModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Dict =model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , *lowerCAmelCase_ : Tuple ) -> Optional[Any]:
'''simple docstring'''
A__ : Any =OpenAIGPTDoubleHeadsModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Any =model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : str , *lowerCAmelCase_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Any =self.num_labels
A__ : str =OpenAIGPTForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A__ : Tuple =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Optional[Any] =model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
A__ : List[Any] =self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : str =config_and_inputs
A__ : Optional[int] ={
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
__snake_case = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
__snake_case = (
{
'feature-extraction': OpenAIGPTModel,
'text-classification': OpenAIGPTForSequenceClassification,
'text-generation': OpenAIGPTLMHeadModel,
'zero-shot': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase__ ( self : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowercase__ ( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any]=False ) -> Union[str, Any]:
'''simple docstring'''
A__ : Any =super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
A__ : Tuple =torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase_ , )
A__ : int =inputs_dict["""labels"""]
A__ : List[Any] =inputs_dict["""labels"""]
A__ : str =torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase_ , )
A__ : Tuple =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def lowercase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
A__ : Optional[Any] =OpenAIGPTModelTester(self )
A__ : Optional[Any] =ConfigTester(self , config_class=lowerCAmelCase_ , n_embd=37 )
def lowercase__ ( self : Dict ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
A__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase_ )
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
A__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase_ )
def lowercase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
A__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase_ )
def lowercase__ ( self : str ) -> Tuple:
'''simple docstring'''
A__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase_ )
@slow
def lowercase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Tuple =OpenAIGPTModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
A__ : Any =OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(lowerCAmelCase_ )
A__ : Optional[Any] =torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=lowerCAmelCase_ ) # the president is
A__ : List[str] =[
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
A__ : List[Any] =model.generate(lowerCAmelCase_ , do_sample=lowerCAmelCase_ )
self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase_ )
| 136 |
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : List[Any] =ArgumentParser("""Transformers CLI tool""", usage="""transformers-cli <command> [<args>]""" )
A__ : List[Any] =parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(__snake_case )
DownloadCommand.register_subcommand(__snake_case )
EnvironmentCommand.register_subcommand(__snake_case )
RunCommand.register_subcommand(__snake_case )
ServeCommand.register_subcommand(__snake_case )
UserCommands.register_subcommand(__snake_case )
AddNewModelCommand.register_subcommand(__snake_case )
AddNewModelLikeCommand.register_subcommand(__snake_case )
LfsCommands.register_subcommand(__snake_case )
PTtoTFCommand.register_subcommand(__snake_case )
# Let's go
A__ : List[str] =parser.parse_args()
if not hasattr(__snake_case, """func""" ):
parser.print_help()
exit(1 )
# Run
A__ : Optional[Any] =args.func(__snake_case )
service.run()
if __name__ == "__main__":
main()
| 136 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE_ ( __A ):
"""simple docstring"""
__lowercase : str = 'mra'
def __init__( self , lowerCAmelCase__=5_0_2_6_5 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=1 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-5 , lowerCAmelCase__="absolute" , lowerCAmelCase__=4 , lowerCAmelCase__="full" , lowerCAmelCase__=0 , lowerCAmelCase__=0 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , **lowerCAmelCase__ , ):
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase)
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = block_per_row
__SCREAMING_SNAKE_CASE = approx_mode
__SCREAMING_SNAKE_CASE = initial_prior_first_n_blocks
__SCREAMING_SNAKE_CASE = initial_prior_diagonal_n_blocks
| 100 | """simple docstring"""
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = AlbertConfig.from_json_file(UpperCamelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
A__ = AlbertForPreTraining(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--albert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained ALBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 221 | 0 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ):
A : int = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" )
A : List[str] = AutoTokenizer.from_pretrained("""google/mt5-small""" )
A : str = tokenizer("""Hello there""", return_tensors="""np""" ).input_ids
A : Tuple = tokenizer("""Hi I am""", return_tensors="""np""" ).input_ids
A : Union[str, Any] = shift_tokens_right(lowerCamelCase__, model.config.pad_token_id, model.config.decoder_start_token_id )
A : Optional[Any] = model(lowerCamelCase__, decoder_input_ids=lowerCamelCase__ ).logits
A : List[Any] = optax.softmax_cross_entropy(lowerCamelCase__, onehot(lowerCamelCase__, logits.shape[-1] ) ).mean()
A : Optional[int] = -(labels.shape[-1] * loss.item())
A : Dict = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 115 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_:int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Any = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Any = "roberta"
def __init__( self, lowerCamelCase__=5_0265, lowerCamelCase__=768, lowerCamelCase__=12, lowerCamelCase__=12, lowerCamelCase__=3072, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=512, lowerCamelCase__=2, lowerCamelCase__=0.02, lowerCamelCase__=1e-12, lowerCamelCase__=1, lowerCamelCase__=0, lowerCamelCase__=2, lowerCamelCase__="absolute", lowerCamelCase__=True, lowerCamelCase__=None, **lowerCamelCase__, ):
super().__init__(pad_token_id=lowerCamelCase__, bos_token_id=lowerCamelCase__, eos_token_id=lowerCamelCase__, **lowerCamelCase__ )
A : List[str] = vocab_size
A : List[Any] = hidden_size
A : List[Any] = num_hidden_layers
A : Optional[Any] = num_attention_heads
A : Union[str, Any] = hidden_act
A : Union[str, Any] = intermediate_size
A : Optional[int] = hidden_dropout_prob
A : List[str] = attention_probs_dropout_prob
A : int = max_position_embeddings
A : List[str] = type_vocab_size
A : List[str] = initializer_range
A : Dict = layer_norm_eps
A : Optional[int] = position_embedding_type
A : Any = use_cache
A : Optional[int] = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@property
def _lowerCAmelCase ( self ):
if self.task == "multiple-choice":
A : Optional[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 115 | 1 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class __magic_name__ ( _UpperCamelCase ):
def __init__( self : Optional[int] ,_UpperCAmelCase : Union[str, "sqlalchemy.sql.Selectable"] ,_UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,_UpperCAmelCase : Optional[Features] = None ,_UpperCAmelCase : str = None ,_UpperCAmelCase : bool = False ,**_UpperCAmelCase : Dict ,):
super().__init__(features=_UpperCAmelCase ,cache_dir=_UpperCAmelCase ,keep_in_memory=_UpperCAmelCase ,**_UpperCAmelCase )
_a : Tuple = Sql(
cache_dir=_UpperCAmelCase ,features=_UpperCAmelCase ,sql=_UpperCAmelCase ,con=_UpperCAmelCase ,**_UpperCAmelCase ,)
def __lowercase ( self : Dict ):
_a : Optional[Any] = None
_a : Dict = None
_a : Dict = None
_a : Optional[int] = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase ,download_mode=_UpperCAmelCase ,verification_mode=_UpperCAmelCase ,base_path=_UpperCAmelCase ,)
# Build dataset for splits
_a : List[str] = self.builder.as_dataset(
split='train' ,verification_mode=_UpperCAmelCase ,in_memory=self.keep_in_memory )
return dataset
class __magic_name__ :
def __init__( self : Optional[int] ,_UpperCAmelCase : Dataset ,_UpperCAmelCase : str ,_UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,_UpperCAmelCase : Optional[int] = None ,_UpperCAmelCase : Optional[int] = None ,**_UpperCAmelCase : Dict ,):
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
_a : Dict = dataset
_a : List[Any] = name
_a : Tuple = con
_a : Union[str, Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_a : List[Any] = num_proc
_a : Tuple = to_sql_kwargs
def __lowercase ( self : List[Any] ):
_a : Tuple = self.to_sql_kwargs.pop('sql' ,_UpperCAmelCase )
_a : str = self.to_sql_kwargs.pop('con' ,_UpperCAmelCase )
_a : Optional[Any] = self.to_sql_kwargs.pop('index' ,_UpperCAmelCase )
_a : Any = self._write(index=_UpperCAmelCase ,**self.to_sql_kwargs )
return written
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Dict ):
_a , _a , _a : Any = args
_a : Tuple = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs
_a : Dict = query_table(
table=self.dataset.data ,key=slice(_UpperCAmelCase ,offset + self.batch_size ) ,indices=self.dataset._indices ,)
_a : Tuple = batch.to_pandas()
_a : Dict = df.to_sql(self.name ,self.con ,index=_UpperCAmelCase ,**_UpperCAmelCase )
return num_rows or len(_UpperCAmelCase )
def __lowercase ( self : int ,_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : List[Any] ):
_a : Union[str, Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,self.batch_size ) ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_a , _a : List[Any] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql ,[(offset, index, to_sql_kwargs) for offset in range(0 ,_UpperCAmelCase ,_UpperCAmelCase )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,):
written += num_rows
return written
| 89 |
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=5 ) -> Union[str, Any]:
"""simple docstring"""
assert masked_input.count('''<mask>''' ) == 1
_UpperCamelCase = torch.tensor(tokenizer.encode(__snake_case, add_special_tokens=__snake_case ) ).unsqueeze(0 ) # Batch size 1
_UpperCamelCase = model(__snake_case )[0] # The last hidden-state is the first element of the output tuple
_UpperCamelCase = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
_UpperCamelCase = logits[0, masked_index, :]
_UpperCamelCase = logits.softmax(dim=0 )
_UpperCamelCase , _UpperCamelCase = prob.topk(k=__snake_case, dim=0 )
_UpperCamelCase = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(__snake_case ) )] )
_UpperCamelCase = tokenizer.mask_token
_UpperCamelCase = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ):
_UpperCamelCase = predicted_token_bpe.replace('''\u2581''', ''' ''' )
if " {0}".format(__snake_case ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(__snake_case ), __snake_case ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(__snake_case, __snake_case ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
_a = CamembertTokenizer.from_pretrained("""camembert-base""")
_a = CamembertForMaskedLM.from_pretrained("""camembert-base""")
model.eval()
_a = """Le camembert est <mask> :)"""
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 194 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple = {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/config.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/config.json'
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class __A (snake_case__):
'''simple docstring'''
__lowercase: int = """fnet"""
def __init__( self : Dict , UpperCAmelCase_ : List[Any]=32_000 , UpperCAmelCase_ : str=768 , UpperCAmelCase_ : Any=12 , UpperCAmelCase_ : str=3_072 , UpperCAmelCase_ : Union[str, Any]="gelu_new" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[Any]=512 , UpperCAmelCase_ : List[str]=4 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : List[str]=1E-12 , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Union[str, Any]=512 , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : Optional[int]=2 , **UpperCAmelCase_ : Any , ) ->Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = initializer_range
snake_case_ = type_vocab_size
snake_case_ = layer_norm_eps
snake_case_ = use_tpu_fourier_optimizations
snake_case_ = tpu_short_seq_length
| 233 |
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__SCREAMING_SNAKE_CASE : Tuple = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : bool , UpperCAmelCase_ : str = None , UpperCAmelCase_ : list = None ) ->List[Any]:
"""simple docstring"""
snake_case_ = None
snake_case_ = os.path.abspath(os.path.join("""examples""" , """by_feature""" ) )
snake_case_ = os.path.abspath("""examples""" )
for item in os.listdir(UpperCAmelCase_ ):
if item not in EXCLUDE_EXAMPLES:
snake_case_ = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
if os.path.isfile(UpperCAmelCase_ ) and ".py" in item_path:
with self.subTest(
tested_script=UpperCAmelCase_ , feature_script=UpperCAmelCase_ , tested_section="""main()""" if parser_only else """training_function()""" , ):
snake_case_ = compare_against_test(
os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = """\n""".join(UpperCAmelCase_ )
if special_strings is not None:
for string in special_strings:
snake_case_ = diff.replace(UpperCAmelCase_ , """""" )
self.assertEqual(UpperCAmelCase_ , """""" )
def lowerCAmelCase ( self : Tuple ) ->List[Any]:
"""simple docstring"""
self.one_complete_example("""complete_nlp_example.py""" , UpperCAmelCase_ )
self.one_complete_example("""complete_nlp_example.py""" , UpperCAmelCase_ )
def lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
snake_case_ = os.path.abspath(os.path.join("""examples""" , """cv_example.py""" ) )
snake_case_ = [
""" """ * 16 + """{\n\n""",
""" """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""",
""" """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""",
""" """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""",
""" """ * 20 + """\"epoch\": epoch,\n\n""",
""" """ * 16 + """},\n\n""",
""" """ * 16 + """step=epoch,\n""",
""" """ * 12,
""" """ * 8 + """for step, batch in enumerate(active_dataloader):\n""",
]
self.one_complete_example("""complete_cv_example.py""" , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
self.one_complete_example("""complete_cv_example.py""" , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""})
class __A (snake_case__):
'''simple docstring'''
__lowercase: str = False
@classmethod
def lowerCAmelCase ( cls : Any ) ->List[str]:
"""simple docstring"""
super().setUpClass()
snake_case_ = tempfile.mkdtemp()
snake_case_ = os.path.join(cls._tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
snake_case_ = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def lowerCAmelCase ( cls : List[str] ) ->int:
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""" ) ) )
def lowerCAmelCase ( self : Any ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
snake_case_ = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""" ) ) )
def lowerCAmelCase ( self : Union[str, Any] ) ->str:
"""simple docstring"""
snake_case_ = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
""".split()
snake_case_ = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase_ )
self.assertNotIn("""epoch 0:""" , UpperCAmelCase_ )
self.assertIn("""epoch 1:""" , UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) ->Tuple:
"""simple docstring"""
snake_case_ = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
""".split()
snake_case_ = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase_ )
if torch.cuda.is_available():
snake_case_ = torch.cuda.device_count()
else:
snake_case_ = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" , UpperCAmelCase_ )
self.assertIn("""epoch 1:""" , UpperCAmelCase_ )
else:
self.assertIn("""epoch 0:""" , UpperCAmelCase_ )
self.assertIn("""epoch 1:""" , UpperCAmelCase_ )
@slow
def lowerCAmelCase ( self : Dict ) ->Dict:
"""simple docstring"""
snake_case_ = """
examples/by_feature/cross_validation.py
--num_folds 2
""".split()
with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""} ):
snake_case_ = run_command(self._launch_args + testargs , return_stdout=UpperCAmelCase_ )
snake_case_ = re.findall("""({.+})""" , UpperCAmelCase_ )
snake_case_ = [r for r in results if """accuracy""" in r][-1]
snake_case_ = ast.literal_eval(UpperCAmelCase_ )
self.assertGreaterEqual(results["""accuracy"""] , 0.75 )
def lowerCAmelCase ( self : Optional[int] ) ->int:
"""simple docstring"""
snake_case_ = ["""examples/by_feature/multi_process_metrics.py"""]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCAmelCase ( self : Dict ) ->Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
snake_case_ = F"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase_ , """tracking""" ) ) )
def lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = ["""examples/by_feature/gradient_accumulation.py"""]
run_command(self._launch_args + testargs )
def lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
snake_case_ = ["""examples/by_feature/local_sgd.py"""]
run_command(self._launch_args + testargs )
| 233 | 1 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def UpperCAmelCase_ ( ) -> Tuple:
__lowercase : str = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''' , type=__lowerCAmelCase , default='''microsoft/unixcoder-base-nine''' )
parser.add_argument('''--num_epochs''' , type=__lowerCAmelCase , default=5 )
parser.add_argument('''--batch_size''' , type=__lowerCAmelCase , default=6 )
parser.add_argument('''--gradient_accumulation_steps''' , type=__lowerCAmelCase , default=1 )
parser.add_argument('''--freeze''' , type=__lowerCAmelCase , default=__lowerCAmelCase )
parser.add_argument('''--learning_rate''' , type=__lowerCAmelCase , default=5E-4 )
parser.add_argument('''--seed''' , type=__lowerCAmelCase , default=0 )
parser.add_argument('''--lr_scheduler_type''' , type=__lowerCAmelCase , default='''cosine''' )
parser.add_argument('''--num_warmup_steps''' , type=__lowerCAmelCase , default=10 )
parser.add_argument('''--weight_decay''' , type=__lowerCAmelCase , default=0.01 )
parser.add_argument('''--output_dir''' , type=__lowerCAmelCase , default='''./results''' )
return parser.parse_args()
__lowerCAmelCase : Any = load("accuracy")
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Dict:
__lowercase , __lowercase : Optional[int] = eval_pred
__lowercase : Optional[int] = np.argmax(__lowerCAmelCase , axis=1 )
return metric.compute(predictions=__lowerCAmelCase , references=__lowerCAmelCase )
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[int] , _snake_case : int ):
super().__init__()
__lowercase : Dict = trainer
def snake_case_ ( self : Optional[int] , _snake_case : Any , _snake_case : int , _snake_case : Union[str, Any] , **_snake_case : Union[str, Any] ):
if control.should_evaluate:
__lowercase : int = deepcopy(_snake_case )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' )
return control_copy
def UpperCAmelCase_ ( ) -> Dict:
__lowercase : Tuple = get_args()
set_seed(args.seed )
__lowercase : List[Any] = load_dataset('''codeparrot/codecomplex''' , split='''train''' )
__lowercase : List[str] = dataset.train_test_split(test_size=0.2 )
__lowercase : Optional[int] = train_test['''test'''].train_test_split(test_size=0.5 )
__lowercase : List[Any] = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
} )
print('''Loading tokenizer and model''' )
__lowercase : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt )
__lowercase : Optional[int] = tokenizer.eos_token
__lowercase : int = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
__lowercase : Tuple = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
__lowercase : List[Any] = False
__lowercase : Dict = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) )
def tokenize(__lowerCAmelCase ):
__lowercase : Optional[int] = tokenizer(example['''src'''] , truncation=__lowerCAmelCase , max_length=1_024 )
__lowercase : Tuple = labels.straint(example['''complexity'''] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
__lowercase : List[Any] = train_test_validation.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=train_test_validation['''train'''].column_names , )
__lowercase : Any = DataCollatorWithPadding(tokenizer=__lowerCAmelCase )
__lowercase : Union[str, Any] = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , )
__lowercase : str = Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=__lowerCAmelCase , data_collator=__lowerCAmelCase , compute_metrics=__lowerCAmelCase , )
print('''Training...''' )
trainer.add_callback(CustomCallback(__lowerCAmelCase ) )
trainer.train()
if __name__ == "__main__":
main()
| 156 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> int:
if attention_mask is None:
__lowercase : int = tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __lowerCAmelCase :
"""simple docstring"""
A__ : Union[str, Any] = OPTConfig
A__ : Optional[int] = {}
A__ : Optional[int] = '''gelu'''
def __init__( self : Tuple , _snake_case : Dict , _snake_case : List[str]=13 , _snake_case : Optional[Any]=7 , _snake_case : List[str]=True , _snake_case : Union[str, Any]=False , _snake_case : Union[str, Any]=99 , _snake_case : Dict=16 , _snake_case : Any=2 , _snake_case : Dict=4 , _snake_case : List[Any]=4 , _snake_case : Optional[int]="gelu" , _snake_case : List[str]=0.1 , _snake_case : List[str]=0.1 , _snake_case : List[Any]=20 , _snake_case : Any=2 , _snake_case : List[str]=1 , _snake_case : Tuple=0 , _snake_case : Dict=16 , _snake_case : Tuple=16 , ):
__lowercase : Dict = parent
__lowercase : str = batch_size
__lowercase : List[str] = seq_length
__lowercase : Optional[int] = is_training
__lowercase : Optional[int] = use_labels
__lowercase : Optional[int] = vocab_size
__lowercase : Optional[Any] = hidden_size
__lowercase : Dict = num_hidden_layers
__lowercase : List[Any] = num_attention_heads
__lowercase : Any = intermediate_size
__lowercase : Dict = hidden_act
__lowercase : Any = hidden_dropout_prob
__lowercase : Tuple = attention_probs_dropout_prob
__lowercase : Union[str, Any] = max_position_embeddings
__lowercase : Any = eos_token_id
__lowercase : List[Any] = pad_token_id
__lowercase : Optional[int] = bos_token_id
__lowercase : List[str] = embed_dim
__lowercase : Any = word_embed_proj_dim
__lowercase : Optional[int] = False
def snake_case_ ( self : Optional[Any] ):
__lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowercase : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowercase : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowercase : Any = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=_snake_case , **self.config_updates , )
__lowercase : Optional[int] = prepare_opt_inputs_dict(_snake_case , _snake_case )
return config, inputs_dict
def snake_case_ ( self : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Union[str, Any] ):
__lowercase : int = TFOPTModel(config=_snake_case )
__lowercase : Union[str, Any] = inputs_dict['''input_ids''']
__lowercase : Tuple = input_ids[:1, :]
__lowercase : Optional[Any] = inputs_dict['''attention_mask'''][:1, :]
__lowercase : Dict = 1
# first forward pass
__lowercase : Dict = model(_snake_case , attention_mask=_snake_case , use_cache=_snake_case )
__lowercase , __lowercase : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowercase : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowercase : List[str] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowercase : str = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowercase : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowercase : Any = model(_snake_case , attention_mask=_snake_case )[0]
__lowercase : List[Any] = model(_snake_case , attention_mask=_snake_case , past_key_values=_snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowercase : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowercase : List[str] = output_from_no_past[:, -3:, random_slice_idx]
__lowercase : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_snake_case , _snake_case , rtol=1E-3 )
@require_tf
class __lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : List[Any] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
A__ : int = (TFOPTForCausalLM,) if is_tf_available() else ()
A__ : List[str] = (
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
A__ : Union[str, Any] = False
A__ : Optional[int] = False
A__ : int = False
A__ : List[str] = 1_0
def snake_case_ ( self : Any ):
__lowercase : Optional[Any] = TFOPTModelTester(self )
__lowercase : List[Any] = ConfigTester(self , config_class=_snake_case )
def snake_case_ ( self : Tuple ):
self.config_tester.run_common_tests()
def snake_case_ ( self : int ):
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_snake_case )
def snake_case_ ( self : List[Any] ):
__lowercase , __lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(_snake_case : Optional[int] , _snake_case : Dict ):
if hasattr(_snake_case , '''weight''' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(_snake_case , '''weight''' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
__lowercase : Union[str, Any] = model_class(config=_snake_case )
__lowercase : int = _get_word_embedding_weight(_snake_case , model.get_input_embeddings() )
__lowercase : str = _get_word_embedding_weight(_snake_case , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(_snake_case )
__lowercase : Dict = _get_word_embedding_weight(_snake_case , model.get_input_embeddings() )
__lowercase : Tuple = _get_word_embedding_weight(_snake_case , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__lowercase : List[str] = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , _snake_case )
# check that weights remain the same after resizing
__lowercase : str = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowercase : str = False
self.assertTrue(_snake_case )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , _snake_case )
__lowercase : Optional[Any] = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowercase : Optional[Any] = False
self.assertTrue(_snake_case )
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Dict:
return tf.constant(__lowerCAmelCase , dtype=tf.intaa )
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : Optional[Any] = 9_9
def snake_case_ ( self : List[Any] ):
__lowercase : List[Any] = tf.ones((4, 1) , dtype=tf.intaa ) * 2
__lowercase : Tuple = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
__lowercase : List[str] = input_ids.shape[0]
__lowercase : int = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case_ ( self : Tuple ):
__lowercase : int = TFOPTModel.from_pretrained('''facebook/opt-350m''' )
__lowercase : Optional[int] = _long_tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
__lowercase : int = tf.not_equal(_snake_case , model.config.pad_token_id )
with tf.GradientTape():
__lowercase : Optional[int] = model(input_ids=_snake_case , attention_mask=_snake_case ).last_hidden_state
__lowercase : Optional[Any] = (1, 11, 512)
self.assertEqual(output.shape , _snake_case )
__lowercase : Tuple = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , _snake_case , atol=4E-3 ) )
__lowercase : Union[str, Any] = tf.function(_snake_case , jit_compile=_snake_case )
__lowercase : Dict = xla_generate(_snake_case , _snake_case )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , _snake_case , atol=4E-2 ) )
@require_tf
@slow
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self : Optional[Any] ):
super().setUp()
__lowercase : Tuple = '''facebook/opt-350m'''
def snake_case_ ( self : int ):
__lowercase : Any = TFOPTForCausalLM.from_pretrained(self.path_model )
__lowercase : Dict = GPTaTokenizer.from_pretrained(self.path_model )
__lowercase : Union[str, Any] = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__lowercase : int = tokenizer(_snake_case , return_tensors='''tf''' , padding=_snake_case , add_special_tokens=_snake_case )
__lowercase : str = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
__lowercase : str = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-4 ) )
__lowercase : Any = tf.function(_snake_case , jit_compile=_snake_case )
__lowercase : List[str] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-4 ) )
@require_tf
@slow
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def snake_case_ ( self : str ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def snake_case_ ( self : List[Any] ):
__lowercase : Optional[Any] = '''facebook/opt-125m'''
__lowercase : int = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
__lowercase : Dict = []
__lowercase : Optional[int] = GPTaTokenizer.from_pretrained(_snake_case )
__lowercase : List[str] = TFOPTForCausalLM.from_pretrained(_snake_case )
for prompt in self.prompts:
__lowercase : List[Any] = tokenizer(_snake_case , return_tensors='''tf''' ).input_ids
__lowercase : int = model.generate(_snake_case , max_length=10 )
__lowercase : Union[str, Any] = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
predicted_outputs += generated_string
self.assertListEqual(_snake_case , _snake_case )
def snake_case_ ( self : Union[str, Any] ):
__lowercase : List[str] = '''facebook/opt-350m'''
__lowercase : List[Any] = GPTaTokenizer.from_pretrained(_snake_case )
__lowercase : Tuple = TFOPTForCausalLM.from_pretrained(_snake_case )
__lowercase : List[str] = '''left'''
# use different length sentences to test batching
__lowercase : str = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__lowercase : List[str] = tokenizer(_snake_case , return_tensors='''tf''' , padding=_snake_case )
__lowercase : Optional[Any] = inputs['''input_ids''']
__lowercase : List[str] = model.generate(input_ids=_snake_case , attention_mask=inputs['''attention_mask'''] )
__lowercase : List[str] = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
__lowercase : int = model.generate(input_ids=_snake_case )
__lowercase : Optional[Any] = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] , tf.intaa ) )
__lowercase : Optional[int] = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
__lowercase : Optional[Any] = model.generate(input_ids=_snake_case , max_length=model.config.max_length - num_paddings )
__lowercase : str = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
__lowercase : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_snake_case )
__lowercase : Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=_snake_case )
__lowercase : List[Any] = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(_snake_case , _snake_case )
self.assertListEqual(_snake_case , [non_padded_sentence, padded_sentence] )
def snake_case_ ( self : Union[str, Any] ):
__lowercase : List[Any] = '''facebook/opt-350m'''
__lowercase : str = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
__lowercase : Union[str, Any] = []
__lowercase : List[str] = GPTaTokenizer.from_pretrained(_snake_case )
__lowercase : List[str] = TFOPTForCausalLM.from_pretrained(_snake_case )
for prompt in self.prompts:
__lowercase : Union[str, Any] = tokenizer(_snake_case , return_tensors='''tf''' ).input_ids
__lowercase : List[str] = model.generate(_snake_case , max_length=10 )
__lowercase : Tuple = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
predicted_outputs += generated_string
self.assertListEqual(_snake_case , _snake_case )
| 156 | 1 |
from math import factorial
def __lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : int ):
'''simple docstring'''
if n < k or k < 0:
raise ValueError("""Please enter positive integers for n and k where n >= k""" )
return factorial(lowerCamelCase__ ) // (factorial(lowerCamelCase__ ) * factorial(n - k ))
if __name__ == "__main__":
print(
"The number of five-card hands possible from a standard",
f"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
"If a class of 40 students must be arranged into groups of",
f"""4 for group projects, there are {combinations(40, 4)} ways""",
"to arrange them.\n",
)
print(
"If 10 teams are competing in a Formula One race, there",
f"""are {combinations(10, 3)} ways that first, second and""",
"third place can be awarded.",
)
| 353 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : int = {"vocab_file": "spiece.model"}
UpperCAmelCase : Optional[int] = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class __lowercase ( a_ ):
"""simple docstring"""
def __init__( self , A , A=False , A=True , A=False , A="<s>" , A="</s>" , A="<unk>" , A="<sep>" , A="<pad>" , A="<cls>" , A="<mask>" , A=["<eop>", "<eod>"] , A = None , **A , ) -> None:
'''simple docstring'''
lowerCamelCase = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , additional_special_tokens=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
lowerCamelCase = 3
lowerCamelCase = do_lower_case
lowerCamelCase = remove_space
lowerCamelCase = keep_accents
lowerCamelCase = vocab_file
lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
lowerCamelCase = jieba
lowerCamelCase = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def __A ( self ) -> int:
'''simple docstring'''
return len(self.sp_model )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = self.__dict__.copy()
lowerCamelCase = None
return state
def __setstate__( self , A ) -> int:
'''simple docstring'''
lowerCamelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCamelCase = {}
lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __A ( self , A ) -> Any:
'''simple docstring'''
if self.remove_space:
lowerCamelCase = """ """.join(inputs.strip().split() )
else:
lowerCamelCase = inputs
lowerCamelCase = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
lowerCamelCase = unicodedata.normalize("""NFKD""" , A )
lowerCamelCase = """""".join([c for c in outputs if not unicodedata.combining(A )] )
if self.do_lower_case:
lowerCamelCase = outputs.lower()
return outputs
def __A ( self , A ) -> List[str]:
'''simple docstring'''
lowerCamelCase = self.preprocess_text(A )
lowerCamelCase = self.sp_model.encode(A , out_type=A )
lowerCamelCase = []
for piece in pieces:
if len(A ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
lowerCamelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(A , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase = cur_pieces[1:]
else:
lowerCamelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(A )
else:
new_pieces.append(A )
return new_pieces
def __A ( self , A ) -> Union[str, Any]:
'''simple docstring'''
return self.sp_model.PieceToId(A )
def __A ( self , A ) -> int:
'''simple docstring'''
return self.sp_model.IdToPiece(A )
def __A ( self , A ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = """""".join(A ).replace(A , """ """ ).strip()
return out_string
def __A ( self , A , A = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __A ( self , A , A = None , A = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is not None:
return ([0] * len(A )) + [1] + ([0] * len(A )) + [1, 1]
return ([0] * len(A )) + [1, 1]
def __A ( self , A , A = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __A ( self , A , A = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , """wb""" ) as fi:
lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
def __A ( self , *A , **A ) -> int:
'''simple docstring'''
lowerCamelCase = super()._decode(*A , **A )
lowerCamelCase = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 66 | 0 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class SCREAMING_SNAKE_CASE__ :
def __init__( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any]=1_3 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Any=2_4 , lowerCAmelCase_ : Optional[int]=1_6 , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : str=3_2 , lowerCAmelCase_ : Union[str, Any]=5 , lowerCAmelCase_ : Any=4 , lowerCAmelCase_ : Any=3_7 , lowerCAmelCase_ : Union[str, Any]="gelu" , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : List[str]=1_0 , lowerCAmelCase_ : str=0.02 , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[Any]=2 , lowerCAmelCase_ : Tuple=2 , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = patch_size
lowercase_ = max_length
lowercase_ = num_mel_bins
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = scope
lowercase_ = frequency_stride
lowercase_ = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowercase_ = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
lowercase_ = (self.max_length - self.patch_size) // self.time_stride + 1
lowercase_ = frequency_out_dimension * time_out_dimension
lowercase_ = num_patches + 2
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins])
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase_ = self.get_config()
return config, input_values, labels
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = ASTModel(config=lowerCAmelCase_)
model.to(lowerCAmelCase_)
model.eval()
lowercase_ = model(lowerCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""input_values""": input_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowercase__ = (
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str]):
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = ASTModelTester(self)
lowercase_ = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=3_7)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""AST does not use inputs_embeds""")
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
pass
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(lowerCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear))
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(lowerCAmelCase_)
lowercase_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["""input_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_)
@slow
def _UpperCAmelCase ( self : int):
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = ASTModel.from_pretrained(lowerCAmelCase_)
self.assertIsNotNone(lowerCAmelCase_)
def _SCREAMING_SNAKE_CASE () -> List[Any]:
'''simple docstring'''
lowercase_ = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" )
lowercase_ , lowercase_ = torchaudio.load(__lowerCAmelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def _UpperCAmelCase ( self : int):
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""")
if is_torchaudio_available()
else None
)
@slow
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.default_feature_extractor
lowercase_ = ASTForAudioClassification.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""").to(lowerCAmelCase_)
lowercase_ = self.default_feature_extractor
lowercase_ , lowercase_ = prepare_audio()
lowercase_ = audio.squeeze().numpy()
lowercase_ = feature_extractor(lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , return_tensors="""pt""").to(lowerCAmelCase_)
# forward pass
with torch.no_grad():
lowercase_ = model(**lowerCAmelCase_)
# verify the logits
lowercase_ = torch.Size((1, 5_2_7))
self.assertEqual(outputs.logits.shape , lowerCAmelCase_)
lowercase_ = torch.tensor([-0.8_760, -7.0_042, -8.6_602]).to(lowerCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4))
| 136 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 1_00_00_00 ) -> int:
'''simple docstring'''
lowercase_ = 1
lowercase_ = 1
lowercase_ = {1: 1}
for inputa in range(2 , __lowerCAmelCase ):
lowercase_ = 0
lowercase_ = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowercase_ = (3 * number) + 1
counter += 1
if inputa not in counters:
lowercase_ = counter
if counter > pre_counter:
lowercase_ = inputa
lowercase_ = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 136 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : str = logging.get_logger(__name__)
lowercase : List[Any] = {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : List[Any] = '''realm'''
def __init__( self , __UpperCamelCase=3_05_22 , __UpperCamelCase=7_68 , __UpperCamelCase=1_28 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=8 , __UpperCamelCase=30_72 , __UpperCamelCase="gelu_new" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_12 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-12 , __UpperCamelCase=2_56 , __UpperCamelCase=10 , __UpperCamelCase=1E-3 , __UpperCamelCase=5 , __UpperCamelCase=3_20 , __UpperCamelCase=13_35_37_18 , __UpperCamelCase=50_00 , __UpperCamelCase=1 , __UpperCamelCase=0 , __UpperCamelCase=2 , **__UpperCamelCase , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
# Common config
__UpperCamelCase : List[str] = vocab_size
__UpperCamelCase : Optional[int] = max_position_embeddings
__UpperCamelCase : Any = hidden_size
__UpperCamelCase : Any = retriever_proj_size
__UpperCamelCase : Dict = num_hidden_layers
__UpperCamelCase : Union[str, Any] = num_attention_heads
__UpperCamelCase : int = num_candidates
__UpperCamelCase : Dict = intermediate_size
__UpperCamelCase : List[Any] = hidden_act
__UpperCamelCase : str = hidden_dropout_prob
__UpperCamelCase : Optional[int] = attention_probs_dropout_prob
__UpperCamelCase : List[str] = initializer_range
__UpperCamelCase : Tuple = type_vocab_size
__UpperCamelCase : Any = layer_norm_eps
# Reader config
__UpperCamelCase : str = span_hidden_size
__UpperCamelCase : str = max_span_width
__UpperCamelCase : str = reader_layer_norm_eps
__UpperCamelCase : Optional[int] = reader_beam_size
__UpperCamelCase : int = reader_seq_len
# Retrieval config
__UpperCamelCase : Dict = num_block_records
__UpperCamelCase : Any = searcher_beam_size | 357 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers | 171 | 0 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = inspect.getfile(accelerate.test_utils )
__UpperCAmelCase : Any = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps""", """test_metrics.py"""] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
__UpperCAmelCase : List[str] = test_metrics
@require_cpu
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
debug_launcher(self.test_metrics.main )
@require_single_gpu
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
print(f'''Found {torch.cuda.device_count()} devices.''' )
__UpperCAmelCase : Optional[int] = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase , env=os.environ.copy() )
| 115 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
UpperCAmelCase : List[Any] = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = """facebook/nllb-200-distilled-600M"""
__a = (
"""This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """
"""be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """
"""which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """
"""plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."""
)
__a = """translator"""
__a = AutoTokenizer
__a = AutoModelForSeqaSeqLM
__a = LANGUAGE_CODES
__a = ["""text""", """text""", """text"""]
__a = ["""text"""]
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : List[Any] ):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'''{tgt_lang} is not a supported language.''' )
__UpperCAmelCase : Union[str, Any] = self.lang_to_code[src_lang]
__UpperCAmelCase : Dict = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
UpperCamelCase , return_tensors="""pt""" , src_lang=UpperCamelCase , tgt_lang=UpperCamelCase )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : List[str] ):
'''simple docstring'''
return self.model.generate(**UpperCamelCase )
def lowerCamelCase__ ( self : Dict , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=UpperCamelCase )
| 115 | 1 |
"""simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
_snake_case = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, 'r', encoding='utf-8') as f:
_snake_case = json.load(f)
@require_torch
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : Dict , UpperCAmelCase__ : Optional[Any] ) -> List[str]:
return FSMTTokenizer.from_pretrained(_lowerCamelCase )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Optional[int] ) -> Tuple:
_a : Dict = FSMTForConditionalGeneration.from_pretrained(_lowerCamelCase ).to(_lowerCamelCase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def _lowercase ( self : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple ) -> str:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
_a : List[str] = f"""facebook/wmt19-{pair}"""
_a : Union[str, Any] = self.get_tokenizer(_lowerCamelCase )
_a : List[str] = self.get_model(_lowerCamelCase )
_a : Optional[Any] = bleu_data[pair]['''src''']
_a : Dict = bleu_data[pair]['''tgt''']
_a : Tuple = tokenizer(_lowerCamelCase , return_tensors="""pt""" , truncation=_lowerCamelCase , padding="""longest""" ).to(_lowerCamelCase )
_a : Union[str, Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
_a : Tuple = tokenizer.batch_decode(
_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
_a : int = calculate_bleu(_lowerCamelCase , _lowerCamelCase )
print(_lowerCamelCase )
self.assertGreaterEqual(scores["""bleu"""] , _lowerCamelCase )
| 369 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
UpperCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline
UpperCamelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCamelCase : Tuple = PipelineTesterMixin.required_optional_params - {'''latents'''}
UpperCamelCase : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowercase ( self : Any ) -> List[Any]:
torch.manual_seed(0 )
_a : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase__ , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
_a : Union[str, Any] = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
_a : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_a : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=32 , )
_a : Tuple = CLIPTextModel(UpperCAmelCase__ )
_a : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=UpperCAmelCase__ )
_a : Dict = CLIPTextModelWithProjection(UpperCAmelCase__ )
_a : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=UpperCAmelCase__ )
_a : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=0 ) -> int:
_a : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
_a : Any = image / 2 + 0.5
if str(UpperCAmelCase__ ).startswith("""mps""" ):
_a : Any = torch.manual_seed(UpperCAmelCase__ )
else:
_a : Tuple = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
_a : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.7_5,
}
return inputs
def _lowercase ( self : Any ) -> List[Any]:
_a : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_a : Dict = self.get_dummy_components()
_a : List[Any] = StableDiffusionXLImgaImgPipeline(**UpperCAmelCase__ )
_a : Union[str, Any] = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : List[str] = self.get_dummy_inputs(UpperCAmelCase__ )
_a : List[str] = sd_pipe(**UpperCAmelCase__ ).images
_a : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a : List[str] = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase ( self : Any ) -> Any:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def _lowercase ( self : List[Any] ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def _lowercase ( self : Any ) -> Any:
pass
def _lowercase ( self : Tuple ) -> Union[str, Any]:
_a : int = self.get_dummy_components()
_a : Any = StableDiffusionXLImgaImgPipeline(**UpperCAmelCase__ )
_a : Dict = sd_pipe.to(UpperCAmelCase__ )
_a : List[str] = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
# forward without prompt embeds
_a : int = self.get_dummy_inputs(UpperCAmelCase__ )
_a : List[str] = 3 * ["""this is a negative prompt"""]
_a : Dict = negative_prompt
_a : Dict = 3 * [inputs["""prompt"""]]
_a : Optional[Any] = sd_pipe(**UpperCAmelCase__ )
_a : Tuple = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_a : int = self.get_dummy_inputs(UpperCAmelCase__ )
_a : Union[str, Any] = 3 * ["""this is a negative prompt"""]
_a : int = 3 * [inputs.pop("""prompt""" )]
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : List[str] = sd_pipe.encode_prompt(UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ )
_a : Tuple = sd_pipe(
**UpperCAmelCase__ , prompt_embeds=UpperCAmelCase__ , negative_prompt_embeds=UpperCAmelCase__ , pooled_prompt_embeds=UpperCAmelCase__ , negative_pooled_prompt_embeds=UpperCAmelCase__ , )
_a : Dict = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : List[str] ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : str="cpu" , UpperCAmelCase__ : str=torch.floataa , UpperCAmelCase__ : List[Any]=0 ) -> List[str]:
_a : List[str] = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
_a : Union[str, Any] = np.random.RandomState(UpperCAmelCase__ ).standard_normal((1, 4, 64, 64) )
_a : List[Any] = torch.from_numpy(UpperCAmelCase__ ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
_a : Any = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _lowercase ( self : int ) -> Union[str, Any]:
_a : Union[str, Any] = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : List[str] = self.get_inputs(UpperCAmelCase__ )
_a : Tuple = pipe(**UpperCAmelCase__ ).images
_a : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_a : int = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 324 | 0 |
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
lowerCamelCase : Dict = logging.getLogger(__name__)
lowerCamelCase : List[Any] = {'''facebook/bart-base''': BartForConditionalGeneration}
lowerCamelCase : Optional[Any] = {'''facebook/bart-base''': BartTokenizer}
def snake_case_ ( ):
__lowercase : Dict = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" , type=lowerCAmelCase_ , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=lowerCAmelCase_ , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowerCAmelCase_ , )
parser.add_argument(
"""--config_name""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=lowerCAmelCase_ , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help="""Where to store the final ONNX file.""" )
__lowercase : Any = parser.parse_args()
return args
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int="cpu" ):
__lowercase : Union[str, Any] = model_dict[model_name].from_pretrained(lowerCAmelCase_ ).to(lowerCAmelCase_ )
__lowercase : Dict = tokenizer_dict[model_name].from_pretrained(lowerCAmelCase_ )
if model_name in ["facebook/bart-base"]:
__lowercase : Dict = 0
__lowercase : Optional[Any] = None
__lowercase : Union[str, Any] = 0
return huggingface_model, tokenizer
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] ):
model.eval()
__lowercase : List[str] = None
__lowercase : str = torch.jit.script(BARTBeamSearchGenerator(lowerCAmelCase_ ) )
with torch.no_grad():
__lowercase : Dict = """My friends are cool but they eat too many carbs."""
__lowercase : List[str] = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors="""pt""" ).to(model.device )
__lowercase : List[str] = model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=lowerCAmelCase_ , max_length=lowerCAmelCase_ , early_stopping=lowerCAmelCase_ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
lowerCAmelCase_ , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , lowerCAmelCase_ , opset_version=14 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=lowerCAmelCase_ , )
logger.info("""Model exported to {}""".format(lowerCAmelCase_ ) )
__lowercase : Optional[Any] = remove_dup_initializers(os.path.abspath(lowerCAmelCase_ ) )
logger.info("""Deduplicated and optimized model written to {}""".format(lowerCAmelCase_ ) )
__lowercase : Tuple = onnxruntime.InferenceSession(lowerCAmelCase_ )
__lowercase : Dict = ort_sess.run(
lowerCAmelCase_ , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(lowerCAmelCase_ ),
"""max_length""": np.array(lowerCAmelCase_ ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def snake_case_ ( ):
__lowercase : int = parse_args()
__lowercase : List[str] = 5
__lowercase : Union[str, Any] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
__lowercase : List[str] = torch.device(args.device )
__lowercase , __lowercase : Optional[Any] = load_model_tokenizer(args.model_name_or_path , lowerCAmelCase_ )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(lowerCAmelCase_ )
if args.max_length:
__lowercase : Any = args.max_length
if args.num_beams:
__lowercase : List[Any] = args.num_beams
if args.output_file_path:
__lowercase : str = args.output_file_path
else:
__lowercase : int = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main() | 233 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def lowerCAmelCase ( self : int , __a : List[Any]=0 ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = floats_tensor((1, 3, 128, 128) , rng=random.Random(__a ) )
__lowercase : Any = np.random.RandomState(__a )
__lowercase : Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""strength""": 0.75,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=__a )
__lowercase : Optional[Any] = self.get_dummy_inputs()
__lowercase : str = pipe(**__a ).images
__lowercase : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__lowercase : List[Any] = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
__lowercase : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase : Union[str, Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__a )
pipe.set_progress_bar_config(disable=__a )
__lowercase : Dict = self.get_dummy_inputs()
__lowercase : Optional[int] = pipe(**__a ).images
__lowercase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : Any = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
__lowercase : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase : Union[str, Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__a )
# warmup pass to apply optimizations
__lowercase : Optional[int] = pipe(**self.get_dummy_inputs() )
__lowercase : Dict = self.get_dummy_inputs()
__lowercase : str = pipe(**__a ).images
__lowercase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : Any = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase : List[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__a )
__lowercase : Any = self.get_dummy_inputs()
__lowercase : int = pipe(**__a ).images
__lowercase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : Optional[int] = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
__lowercase : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase : Dict = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__a )
__lowercase : Tuple = self.get_dummy_inputs()
__lowercase : Union[str, Any] = pipe(**__a ).images
__lowercase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : Union[str, Any] = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase : Tuple = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__a )
__lowercase : str = self.get_dummy_inputs()
__lowercase : Dict = pipe(**__a ).images
__lowercase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : Dict = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
__lowercase : Union[str, Any] = ort.SessionOptions()
__lowercase : Dict = False
return options
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
__lowercase : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__lowercase : Union[str, Any] = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__lowercase : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__a )
__lowercase : int = """A fantasy landscape, trending on artstation"""
__lowercase : int = np.random.RandomState(0 )
__lowercase : List[str] = pipe(
prompt=__a , image=__a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__a , output_type="""np""" , )
__lowercase : Any = output.images
__lowercase : Optional[int] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__lowercase : Optional[int] = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__lowercase : Any = init_image.resize((768, 512) )
__lowercase : int = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=__a , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__a )
__lowercase : Union[str, Any] = """A fantasy landscape, trending on artstation"""
__lowercase : Any = np.random.RandomState(0 )
__lowercase : Optional[Any] = pipe(
prompt=__a , image=__a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__a , output_type="""np""" , )
__lowercase : str = output.images
__lowercase : Dict = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__lowercase : Union[str, Any] = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 | 233 | 1 |
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
if not isinstance(_UpperCamelCase, _UpperCamelCase ):
raise TypeError('''Input value must be an \'int\' type''' )
lowercase : Optional[Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 173 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Tuple = 'pegasus'
A : int = ['past_key_values']
A : Optional[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , SCREAMING_SNAKE_CASE__=50265 , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=4096 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=4096 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , **SCREAMING_SNAKE_CASE__ , ):
lowercase : List[Any] = vocab_size
lowercase : List[Any] = max_position_embeddings
lowercase : Dict = d_model
lowercase : Optional[Any] = encoder_ffn_dim
lowercase : int = encoder_layers
lowercase : str = encoder_attention_heads
lowercase : Tuple = decoder_ffn_dim
lowercase : List[str] = decoder_layers
lowercase : List[Any] = decoder_attention_heads
lowercase : Tuple = dropout
lowercase : int = attention_dropout
lowercase : Optional[Any] = activation_dropout
lowercase : Dict = activation_function
lowercase : Optional[Any] = init_std
lowercase : Tuple = encoder_layerdrop
lowercase : Optional[int] = decoder_layerdrop
lowercase : List[Any] = use_cache
lowercase : Any = encoder_layers
lowercase : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , forced_eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
@property
def __lowerCamelCase ( self ):
return self.encoder_attention_heads
@property
def __lowerCamelCase ( self ):
return self.d_model
| 173 | 1 |
from scipy.stats import pearsonr
import datasets
a__ : List[Any] = '''\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'''
a__ : Optional[Any] = '''\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'''
a__ : str = '''\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) ->int:
if return_pvalue:
SCREAMING_SNAKE_CASE : Union[str, Any] = pearsonr(_lowerCamelCase , _lowerCamelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(_lowerCamelCase , _lowerCamelCase )[0] )}
| 313 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 | 0 |
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
lowerCAmelCase__ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self , snake_case__ ):
"""simple docstring"""
super().__init__()
lowerCAmelCase : Optional[Any] = torchvision.models.resnetaaa(pretrained=snake_case__ )
lowerCAmelCase : int = list(model.children() )[:-2]
lowerCAmelCase : int = nn.Sequential(*snake_case__ )
lowerCAmelCase : List[Any] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.pool(self.model(snake_case__ ) )
lowerCAmelCase : str = torch.flatten(snake_case__ , start_dim=2 )
lowerCAmelCase : int = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[str] = [json.loads(snake_case__ ) for l in open(snake_case__ )]
lowerCAmelCase : Optional[Any] = os.path.dirname(snake_case__ )
lowerCAmelCase : Tuple = tokenizer
lowerCAmelCase : Any = labels
lowerCAmelCase : Dict = len(snake_case__ )
lowerCAmelCase : Tuple = max_seq_length
lowerCAmelCase : Tuple = transforms
def __len__( self ):
"""simple docstring"""
return len(self.data )
def __getitem__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] , add_special_tokens=snake_case__ ) )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = sentence[0], sentence[1:-1], sentence[-1]
lowerCAmelCase : List[Any] = sentence[: self.max_seq_length]
lowerCAmelCase : Tuple = torch.zeros(self.n_classes )
lowerCAmelCase : List[Any] = 1
lowerCAmelCase : List[Any] = Image.open(os.path.join(self.data_dir , self.data[index]["img"] ) ).convert("RGB" )
lowerCAmelCase : Union[str, Any] = self.transforms(snake_case__ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Any = Counter()
for row in self.data:
label_freqs.update(row["label"] )
return label_freqs
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = [len(row["sentence"] ) for row in batch]
lowerCAmelCase , lowerCAmelCase : Tuple = len(SCREAMING_SNAKE_CASE ), max(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = torch.zeros(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=torch.long )
lowerCAmelCase : Any = torch.zeros(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase : int = input_row["sentence"]
lowerCAmelCase : Union[str, Any] = 1
lowerCAmelCase : List[str] = torch.stack([row["image"] for row in batch] )
lowerCAmelCase : Optional[Any] = torch.stack([row["label"] for row in batch] )
lowerCAmelCase : int = torch.stack([row["image_start_token"] for row in batch] )
lowerCAmelCase : Any = torch.stack([row["image_end_token"] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def a__ ( ):
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def a__ ( ):
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 133 |
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : List[Any] = tmp_path / "cache"
lowerCAmelCase : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase : str = ParquetDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def a__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = tmp_path / "cache"
lowerCAmelCase : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCAmelCase : Tuple = features.copy() if features else default_expected_features
lowerCAmelCase : List[str] = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase : List[str] = ParquetDatasetReader(SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : List[Any] = tmp_path / "cache"
lowerCAmelCase : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCAmelCase : Tuple = ParquetDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , split=SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
if issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Optional[Any] = parquet_path
elif issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Dict = [parquet_path]
lowerCAmelCase : Optional[Any] = tmp_path / "cache"
lowerCAmelCase : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCAmelCase : Optional[Any] = ParquetDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple=("train",) ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for split in splits:
lowerCAmelCase : List[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : Tuple = tmp_path / "cache"
lowerCAmelCase : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase : Optional[Any] = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Any = tmp_path / "cache"
lowerCAmelCase : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCAmelCase : List[Any] = features.copy() if features else default_expected_features
lowerCAmelCase : List[Any] = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase : Optional[int] = ParquetDatasetReader({"train": parquet_path} , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
if split:
lowerCAmelCase : List[str] = {split: parquet_path}
else:
lowerCAmelCase : List[str] = "train"
lowerCAmelCase : str = {"train": parquet_path, "test": parquet_path}
lowerCAmelCase : Optional[int] = tmp_path / "cache"
lowerCAmelCase : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCAmelCase : List[str] = ParquetDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = ParquetDatasetWriter(SCREAMING_SNAKE_CASE , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCAmelCase : List[str] = pq.ParquetFile(tmp_path / "foo.parquet" )
lowerCAmelCase : Union[str, Any] = pf.read()
assert dataset.data.table == output_table
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : str = str(shared_datadir / "test_image_rgb.jpg" )
lowerCAmelCase : List[str] = {"image": [image_path]}
lowerCAmelCase : str = Features({"image": Image()} )
lowerCAmelCase : Optional[Any] = Dataset.from_dict(SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[Any] = ParquetDatasetWriter(SCREAMING_SNAKE_CASE , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCAmelCase : Any = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
lowerCAmelCase : Optional[int] = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=SCREAMING_SNAKE_CASE ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
assert get_writer_batch_size(SCREAMING_SNAKE_CASE ) == expected
| 133 | 1 |
'''simple docstring'''
import os
__snake_case ={"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1_000}
def a_ ( lowerCamelCase : str ):
lowerCAmelCase = 0
lowerCAmelCase = 0
while index < len(lowerCamelCase ) - 1:
lowerCAmelCase = SYMBOLS[numerals[index]]
lowerCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def a_ ( lowerCamelCase : int ):
lowerCAmelCase = ''
lowerCAmelCase = num // 1000
numerals += m_count * "M"
num %= 1000
lowerCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
lowerCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def a_ ( lowerCamelCase : str = "/p089_roman.txt" ):
lowerCAmelCase = 0
with open(os.path.dirname(lowerCamelCase ) + roman_numerals_filename ) as filea:
lowerCAmelCase = filea.readlines()
for line in lines:
lowerCAmelCase = line.strip()
lowerCAmelCase = parse_roman_numerals(lowerCamelCase )
lowerCAmelCase = generate_roman_numerals(lowerCamelCase )
savings += len(lowerCamelCase ) - len(lowerCamelCase )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 |
"""simple docstring"""
from __future__ import annotations
def a__ ( lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None ) -> None:
if start is None:
UpperCAmelCase__ : Dict = 0
if end is None:
UpperCAmelCase__ : List[str] = len(lowerCAmelCase ) - 1
if start >= end:
return
UpperCAmelCase__ : int = (start + end) // 2
slowsort(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
slowsort(lowerCAmelCase , mid + 1 , lowerCAmelCase )
if sequence[end] < sequence[mid]:
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = sequence[mid], sequence[end]
slowsort(lowerCAmelCase , lowerCAmelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 171 | 0 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = [], []
while len(lowercase ) > 1:
lowerCamelCase_ , lowerCamelCase_ = min(lowercase ), max(lowercase )
start.append(lowercase )
end.append(lowercase )
collection.remove(lowercase )
collection.remove(lowercase )
end.reverse()
return start + collection + end
if __name__ == "__main__":
lowerCamelCase : List[str] = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase : Dict = [int(item) for item in user_input.split(",")]
print(*merge_sort(unsorted), sep=",")
| 359 |
import math
def _SCREAMING_SNAKE_CASE ( lowercase : float , lowercase : float ):
'''simple docstring'''
return math.pow(lowercase , 2 ) - a
def _SCREAMING_SNAKE_CASE ( lowercase : float ):
'''simple docstring'''
return 2 * x
def _SCREAMING_SNAKE_CASE ( lowercase : float ):
'''simple docstring'''
lowerCamelCase_ = 2.0
while start <= a:
lowerCamelCase_ = math.pow(lowercase , 2 )
return start
def _SCREAMING_SNAKE_CASE ( lowercase : float , lowercase : int = 99_99 , lowercase : float = 0.00_0000_0000_0001 ):
'''simple docstring'''
if a < 0:
raise ValueError('math domain error' )
lowerCamelCase_ = get_initial_point(lowercase )
for _ in range(lowercase ):
lowerCamelCase_ = value
lowerCamelCase_ = value - fx(lowercase , lowercase ) / fx_derivative(lowercase )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 208 | 0 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( a):
'''simple docstring'''
def __init__( self , *__lowerCamelCase , **__lowerCamelCase) -> None:
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase)
| 11 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowercase__ : Union[str, Any] = HUGGINGFACE_HUB_CACHE
lowercase__ : int = 'config.json'
lowercase__ : Optional[int] = 'diffusion_pytorch_model.bin'
lowercase__ : List[str] = 'diffusion_flax_model.msgpack'
lowercase__ : str = 'model.onnx'
lowercase__ : Optional[int] = 'diffusion_pytorch_model.safetensors'
lowercase__ : List[str] = 'weights.pb'
lowercase__ : str = 'https://huggingface.co'
lowercase__ : str = default_cache_path
lowercase__ : Optional[int] = 'diffusers_modules'
lowercase__ : Optional[int] = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules'))
lowercase__ : Tuple = ['fp16', 'non-ema']
lowercase__ : int = '.self_attn'
| 324 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = KandinskyVaaInpaintPipeline
a = ["image_embeds", "negative_image_embeds", "image", "mask_image"]
a = [
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
a = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
a = False
@property
def lowercase_ ( self : Any ) -> List[Any]:
return 32
@property
def lowercase_ ( self : int ) -> Union[str, Any]:
return 32
@property
def lowercase_ ( self : Dict ) -> Union[str, Any]:
return self.time_input_dim
@property
def lowercase_ ( self : Optional[Any] ) -> int:
return self.time_input_dim * 4
@property
def lowercase_ ( self : int ) -> List[Any]:
return 100
@property
def lowercase_ ( self : int ) -> Any:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(**__lowerCamelCase )
return model
@property
def lowercase_ ( self : Any ) -> Optional[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase_ ( self : List[str] ) -> int:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase_ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ = self.dummy_unet
SCREAMING_SNAKE_CASE__ = self.dummy_movq
SCREAMING_SNAKE_CASE__ = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.00085 , beta_end=0.012 , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , steps_offset=1 , prediction_type='''epsilon''' , thresholding=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def lowercase_ ( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple=0 ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowerCamelCase )
# create init_image
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
SCREAMING_SNAKE_CASE__ = np.ones((64, 64) , dtype=np.floataa )
SCREAMING_SNAKE_CASE__ = 0
if str(__lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def lowercase_ ( self : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = '''cpu'''
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = self.pipeline_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = pipe(**self.get_dummy_inputs(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = pipe(
**self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0]
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = image_from_tuple[0, -3:, -3:, -1]
print(f'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[0.50775903, 0.49527195, 0.48824543, 0.50192237, 0.48644906, 0.49373814, 0.4780598, 0.47234827, 0.48327848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def lowercase_ ( self : Tuple ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : int ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy''' )
SCREAMING_SNAKE_CASE__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE__ = np.ones((768, 768) , dtype=np.floataa )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = '''a hat'''
SCREAMING_SNAKE_CASE__ = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = KandinskyVaaInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder-inpaint''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ = pipeline.to(__lowerCamelCase )
pipeline.set_progress_bar_config(disable=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = pipe_prior(
__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE__ = pipeline(
image=__lowerCamelCase , mask_image=__lowerCamelCase , image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
| 218 |
from __future__ import annotations
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = len(_A ) // 2
# choose the middle 3 elements
SCREAMING_SNAKE_CASE__ = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 218 | 1 |
"""simple docstring"""
import numpy
class a :
def __init__( self : Optional[Any] , lowerCAmelCase : numpy.ndarray , lowerCAmelCase : numpy.ndarray ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
SCREAMING_SNAKE_CASE_: Any =numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
SCREAMING_SNAKE_CASE_: Optional[int] =numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
SCREAMING_SNAKE_CASE_: Tuple =numpy.random.rand(3 , 1 )
# Real output values provided.
SCREAMING_SNAKE_CASE_: List[str] =output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
SCREAMING_SNAKE_CASE_: int =numpy.zeros(output_array.shape )
def lowerCamelCase__ ( self : List[Any] ) -> numpy.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
SCREAMING_SNAKE_CASE_: int =sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
SCREAMING_SNAKE_CASE_: int =sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def lowerCamelCase__ ( self : Union[str, Any] ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
SCREAMING_SNAKE_CASE_: str =numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
SCREAMING_SNAKE_CASE_: int =numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : numpy.ndarray , lowerCAmelCase : int , lowerCAmelCase : bool ) -> None:
'''simple docstring'''
for iteration in range(1 , iterations + 1 ):
SCREAMING_SNAKE_CASE_: int =self.feedforward()
self.back_propagation()
if give_loss:
SCREAMING_SNAKE_CASE_: Any =numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'''Iteration {iteration} Loss: {loss}''' )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : numpy.ndarray ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =input_arr
SCREAMING_SNAKE_CASE_: int =sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
SCREAMING_SNAKE_CASE_: Optional[int] =sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
SCREAMING_SNAKE_CASE_: Any =sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def __magic_name__ ( lowercase ):
return 1 / (1 + numpy.exp(-value ))
def __magic_name__ ( lowercase ):
return (value) * (1 - (value))
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: str =numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
SCREAMING_SNAKE_CASE_: str =numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
SCREAMING_SNAKE_CASE_: Dict =TwoHiddenLayerNeuralNetwork(
input_array=lowercase , output_array=lowercase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=lowercase , iterations=10 , give_loss=lowercase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 173 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
_UpperCAmelCase = {"""target_lang""": """fi""", """source_lang""": """en"""}
_UpperCAmelCase = """>>zh<<"""
_UpperCAmelCase = """Helsinki-NLP/"""
if is_torch_available():
_UpperCAmelCase = """pt"""
elif is_tf_available():
_UpperCAmelCase = """tf"""
else:
_UpperCAmelCase = """jax"""
@require_sentencepiece
class a ( UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Any = MarianTokenizer
UpperCamelCase : List[Any] = False
UpperCamelCase : Optional[Any] = True
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE_: str =["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
SCREAMING_SNAKE_CASE_: List[Any] =dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
SCREAMING_SNAKE_CASE_: Optional[int] =Path(self.tmpdirname )
save_json(lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(lowerCAmelCase , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
SCREAMING_SNAKE_CASE_: str =MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self : str , **lowerCAmelCase : Any ) -> MarianTokenizer:
'''simple docstring'''
return MarianTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : List[str] ) -> int:
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def lowerCamelCase__ ( self : Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] ="""</s>"""
SCREAMING_SNAKE_CASE_: List[str] =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ) , lowerCAmelCase )
def lowerCamelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(lowerCAmelCase ) , 9 )
def lowerCamelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =MarianTokenizer.from_pretrained(f'''{ORG_NAME}opus-mt-en-de''' )
SCREAMING_SNAKE_CASE_: List[Any] =en_de_tokenizer(["""I am a small frog"""] , return_tensors=lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =[38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(lowerCAmelCase , batch.input_ids[0] )
SCREAMING_SNAKE_CASE_: Optional[int] =tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =[x.name for x in Path(lowerCAmelCase ).glob("""*""" )]
self.assertIn("""source.spm""" , lowerCAmelCase )
MarianTokenizer.from_pretrained(lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =self.get_tokenizer()
SCREAMING_SNAKE_CASE_: str =tok(
["""I am a small frog""" * 1000, """I am a small frog"""] , padding=lowerCAmelCase , truncation=lowerCAmelCase , return_tensors=lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def lowerCamelCase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.get_tokenizer()
SCREAMING_SNAKE_CASE_: int =tok(["""I am a tiny frog""", """I am a small frog"""] , padding=lowerCAmelCase , return_tensors=lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowerCamelCase__ ( self : str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple ={"""input_ids""": [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def lowerCamelCase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
SCREAMING_SNAKE_CASE_: Optional[int] ="""Tämä on testi"""
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""This is a test"""
SCREAMING_SNAKE_CASE_: List[Any] =[76, 7, 2047, 2]
SCREAMING_SNAKE_CASE_: Any =[69, 12, 11, 940, 2]
SCREAMING_SNAKE_CASE_: Optional[int] =tokenizer(lowerCAmelCase ).input_ids
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =tokenizer(text_target=lowerCAmelCase ).input_ids
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =tokenizer.decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
| 173 | 1 |
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
_a : int= 6_3_7_8_1_3_7.0
_a : Optional[Any]= 6_3_5_6_7_5_2.3_1_4_2_4_5
_a : Union[str, Any]= 6_378_137
def __UpperCAmelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any ) -> float:
'''simple docstring'''
__snake_case : Union[str, Any] = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
__snake_case : str = atan((1 - flattening) * tan(radians(a__ ) ) )
__snake_case : Optional[Any] = atan((1 - flattening) * tan(radians(a__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
__snake_case : int = haversine_distance(a__ , a__ , a__ , a__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
__snake_case : Any = (b_lata + b_lata) / 2
__snake_case : Any = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
__snake_case : List[str] = (sin(a__ ) ** 2) * (cos(a__ ) ** 2)
__snake_case : int = cos(sigma / 2 ) ** 2
__snake_case : int = (sigma - sin(a__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
__snake_case : List[str] = (cos(a__ ) ** 2) * (sin(a__ ) ** 2)
__snake_case : Union[str, Any] = sin(sigma / 2 ) ** 2
__snake_case : str = (sigma + sin(a__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369 | """simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
_a : Tuple= logging.get_logger(__name__)
class UpperCamelCase ( lowercase ):
def __init__(self : int , *_A : str , **_A : List[str]) -> None:
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.' , _A , )
super().__init__(*_A , **_A)
| 95 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ : str = {
'configuration_mobilenet_v2': [
'MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileNetV2Config',
'MobileNetV2OnnxConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = ['MobileNetV2FeatureExtractor']
lowercase_ : int = ['MobileNetV2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = [
'MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileNetV2ForImageClassification',
'MobileNetV2ForSemanticSegmentation',
'MobileNetV2Model',
'MobileNetV2PreTrainedModel',
'load_tf_weights_in_mobilenet_v2',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
lowercase_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 133 |
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
lowercase_ : str = 'scheduler_config.json'
class __lowerCAmelCase ( UpperCAmelCase__ ):
snake_case_ : List[str] = 1
snake_case_ : Tuple = 2
snake_case_ : List[Any] = 3
snake_case_ : Union[str, Any] = 4
snake_case_ : Optional[int] = 5
snake_case_ : str = 6
snake_case_ : Any = 7
snake_case_ : List[str] = 8
snake_case_ : Optional[Any] = 9
snake_case_ : Any = 10
snake_case_ : int = 11
snake_case_ : int = 12
snake_case_ : Union[str, Any] = 13
snake_case_ : int = 14
@dataclass
class __lowerCAmelCase ( UpperCAmelCase__ ):
snake_case_ : torch.FloatTensor
class __lowerCAmelCase :
snake_case_ : List[str] = SCHEDULER_CONFIG_NAME
snake_case_ : Union[str, Any] = []
snake_case_ : str = True
@classmethod
def UpperCamelCase ( cls : List[str] , snake_case__ : Dict[str, Any] = None , snake_case__ : Optional[str] = None , snake_case__ : int=False , **snake_case__ : Tuple , ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = cls.load_config(
pretrained_model_name_or_path=snake_case__ , subfolder=snake_case__ , return_unused_kwargs=snake_case__ , return_commit_hash=snake_case__ , **snake_case__ , )
return cls.from_config(snake_case__ , return_unused_kwargs=snake_case__ , **snake_case__ )
def UpperCamelCase ( self : List[str] , snake_case__ : Union[str, os.PathLike] , snake_case__ : bool = False , **snake_case__ : List[Any] ):
"""simple docstring"""
self.save_config(save_directory=snake_case__ , push_to_hub=snake_case__ , **snake_case__ )
@property
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return self._get_compatibles()
@classmethod
def UpperCamelCase ( cls : str ):
"""simple docstring"""
_UpperCAmelCase = list(set([cls.__name__] + cls._compatibles ) )
_UpperCAmelCase = importlib.import_module(__name__.split("." )[0] )
_UpperCAmelCase = [
getattr(snake_case__ , snake_case__ ) for c in compatible_classes_str if hasattr(snake_case__ , snake_case__ )
]
return compatible_classes
| 133 | 1 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''nvidia/segformer-b0-finetuned-ade-512-512''': (
'''https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'''
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''segformer'''
def __init__( self : int , lowercase_ : Union[str, Any]=3 , lowercase_ : List[Any]=4 , lowercase_ : Any=[2, 2, 2, 2] , lowercase_ : List[Any]=[8, 4, 2, 1] , lowercase_ : List[str]=[32, 64, 160, 256] , lowercase_ : Optional[int]=[7, 3, 3, 3] , lowercase_ : Union[str, Any]=[4, 2, 2, 2] , lowercase_ : int=[1, 2, 5, 8] , lowercase_ : Optional[Any]=[4, 4, 4, 4] , lowercase_ : Optional[Any]="gelu" , lowercase_ : List[str]=0.0 , lowercase_ : str=0.0 , lowercase_ : Optional[int]=0.1 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Any=0.1 , lowercase_ : int=1e-6 , lowercase_ : Dict=256 , lowercase_ : Dict=255 , **lowercase_ : Dict , ) -> Dict:
"""simple docstring"""
super().__init__(**lowercase_)
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"
" removed, as the behaviour will default to that of reshape_last_stage = True." , lowercase_ , )
_UpperCamelCase = num_channels
_UpperCamelCase = num_encoder_blocks
_UpperCamelCase = depths
_UpperCamelCase = sr_ratios
_UpperCamelCase = hidden_sizes
_UpperCamelCase = patch_sizes
_UpperCamelCase = strides
_UpperCamelCase = mlp_ratios
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = classifier_dropout_prob
_UpperCamelCase = initializer_range
_UpperCamelCase = drop_path_rate
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = decoder_hidden_size
_UpperCamelCase = kwargs.get("reshape_last_stage" , lowercase_)
_UpperCamelCase = semantic_loss_ignore_index
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = version.parse('''1.11''' )
@property
def __UpperCAmelCase ( self : Dict) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def __UpperCAmelCase ( self : Optional[Any]) -> float:
"""simple docstring"""
return 1e-4
@property
def __UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
return 12
| 63 | import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : str) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
_UpperCamelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(lowercase_)
_UpperCamelCase = -1
_UpperCamelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowercase_)
_UpperCamelCase = model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_)
_UpperCamelCase = tokenizer.decode(greedy_ids[0])
with CaptureStdout() as cs:
_UpperCamelCase = TextStreamer(lowercase_)
model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_ , streamer=lowercase_)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase = cs.out[:-1]
self.assertEqual(lowercase_ , lowercase_)
def __UpperCAmelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
_UpperCamelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(lowercase_)
_UpperCamelCase = -1
_UpperCamelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowercase_)
_UpperCamelCase = model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_)
_UpperCamelCase = tokenizer.decode(greedy_ids[0])
_UpperCamelCase = TextIteratorStreamer(lowercase_)
_UpperCamelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase = Thread(target=model.generate , kwargs=lowercase_)
thread.start()
_UpperCamelCase = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowercase_ , lowercase_)
def __UpperCAmelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
_UpperCamelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(lowercase_)
_UpperCamelCase = -1
_UpperCamelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowercase_)
_UpperCamelCase = model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_)
_UpperCamelCase = greedy_ids[:, input_ids.shape[1] :]
_UpperCamelCase = tokenizer.decode(new_greedy_ids[0])
with CaptureStdout() as cs:
_UpperCamelCase = TextStreamer(lowercase_ , skip_prompt=lowercase_)
model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_ , streamer=lowercase_)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase = cs.out[:-1]
self.assertEqual(lowercase_ , lowercase_)
def __UpperCAmelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = AutoTokenizer.from_pretrained("distilgpt2")
_UpperCamelCase = AutoModelForCausalLM.from_pretrained("distilgpt2").to(lowercase_)
_UpperCamelCase = -1
_UpperCamelCase = torch.ones((1, 5) , device=lowercase_).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCamelCase = TextStreamer(lowercase_ , skip_special_tokens=lowercase_)
model.generate(lowercase_ , max_new_tokens=1 , do_sample=lowercase_ , streamer=lowercase_)
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCamelCase = cs.out[:-1] # Remove the final "\n"
_UpperCamelCase = tokenizer(lowercase_ , return_tensors="pt")
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1))
def __UpperCAmelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
_UpperCamelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(lowercase_)
_UpperCamelCase = -1
_UpperCamelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowercase_)
_UpperCamelCase = TextIteratorStreamer(lowercase_ , timeout=0.0_01)
_UpperCamelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase = Thread(target=model.generate , kwargs=lowercase_)
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowercase_):
_UpperCamelCase = ""
for new_text in streamer:
streamer_text += new_text
| 63 | 1 |
'''simple docstring'''
import string
import numpy
def lowercase_ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , _lowerCAmelCase )
class _A :
_SCREAMING_SNAKE_CASE : Optional[int] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
_SCREAMING_SNAKE_CASE : int = numpy.vectorize(lambda __SCREAMING_SNAKE_CASE : x % 36 )
_SCREAMING_SNAKE_CASE : Optional[int] = numpy.vectorize(SCREAMING_SNAKE_CASE_ )
def __init__( self , __UpperCAmelCase ) -> None:
'''simple docstring'''
__UpperCAmelCase : List[str] = self.modulus(_a ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__UpperCAmelCase : List[str] = encrypt_key.shape[0]
def __A ( self , __UpperCAmelCase ) -> int:
'''simple docstring'''
return self.key_string.index(_a )
def __A ( self , __UpperCAmelCase ) -> str:
'''simple docstring'''
return self.key_string[round(_a )]
def __A ( self ) -> None:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__UpperCAmelCase : Tuple = det % len(self.key_string )
__UpperCAmelCase : Tuple = len(self.key_string )
if greatest_common_divisor(_a , len(self.key_string ) ) != 1:
__UpperCAmelCase : Union[str, Any] = (
f'determinant modular {req_l} of encryption key({det}) '
f'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(_a )
def __A ( self , __UpperCAmelCase ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = [char for char in text.upper() if char in self.key_string]
__UpperCAmelCase : List[str] = chars[-1]
while len(_a ) % self.break_key != 0:
chars.append(_a )
return "".join(_a )
def __A ( self , __UpperCAmelCase ) -> str:
'''simple docstring'''
__UpperCAmelCase : Tuple = self.process_text(text.upper() )
__UpperCAmelCase : Dict = ''
for i in range(0 , len(_a ) - self.break_key + 1 , self.break_key ):
__UpperCAmelCase : List[Any] = text[i : i + self.break_key]
__UpperCAmelCase : Union[str, Any] = [self.replace_letters(_a ) for char in batch]
__UpperCAmelCase : Optional[int] = numpy.array([vec] ).T
__UpperCAmelCase : Optional[Any] = self.modulus(self.encrypt_key.dot(_a ) ).T.tolist()[
0
]
__UpperCAmelCase : List[str] = ''.join(
self.replace_digits(_a ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def __A ( self ) -> numpy.ndarray:
'''simple docstring'''
__UpperCAmelCase : Dict = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__UpperCAmelCase : int = det % len(self.key_string )
__UpperCAmelCase : List[str] = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__UpperCAmelCase : List[Any] = i
break
__UpperCAmelCase : Dict = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(_a ) )
def __A ( self , __UpperCAmelCase ) -> str:
'''simple docstring'''
__UpperCAmelCase : int = self.make_decrypt_key()
__UpperCAmelCase : Tuple = self.process_text(text.upper() )
__UpperCAmelCase : Union[str, Any] = ''
for i in range(0 , len(_a ) - self.break_key + 1 , self.break_key ):
__UpperCAmelCase : List[str] = text[i : i + self.break_key]
__UpperCAmelCase : str = [self.replace_letters(_a ) for char in batch]
__UpperCAmelCase : List[Any] = numpy.array([vec] ).T
__UpperCAmelCase : Any = self.modulus(decrypt_key.dot(_a ) ).T.tolist()[0]
__UpperCAmelCase : Tuple = ''.join(
self.replace_digits(_a ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowercase_ ( ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = int(input("""Enter the order of the encryption key: """ ) )
__UpperCAmelCase : Tuple = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(_lowerCAmelCase ):
__UpperCAmelCase : Union[str, Any] = [int(_lowerCAmelCase ) for x in input().split()]
hill_matrix.append(_lowerCAmelCase )
__UpperCAmelCase : List[str] = HillCipher(numpy.array(_lowerCAmelCase ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
__UpperCAmelCase : str = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
__UpperCAmelCase : Optional[int] = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(_lowerCAmelCase ) )
elif option == "2":
__UpperCAmelCase : List[str] = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(_lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 254 |
'''simple docstring'''
def a_ ( _lowerCAmelCase ) -> str:
if not all(char in '01' for char in bin_string ):
raise ValueError('Non-binary value was passed to the function' )
if not bin_string:
raise ValueError('Empty string was passed to the function' )
__lowerCamelCase : int = ''
while len(_lowerCAmelCase ) % 3 != 0:
__lowerCamelCase : str = '0' + bin_string
__lowerCamelCase : Union[str, Any] = [
bin_string[index : index + 3]
for index in range(len(_lowerCAmelCase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
__lowerCamelCase : Tuple = 0
for index, val in enumerate(_lowerCAmelCase ):
oct_val += int(2 ** (2 - index) * int(_lowerCAmelCase ) )
oct_string += str(_lowerCAmelCase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 208 | 0 |
"""simple docstring"""
class _A :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = None
lowercase = None
lowercase = graph
self._normalize_graph(__lowerCAmelCase , __lowerCAmelCase )
lowercase = len(__lowerCAmelCase )
lowercase = None
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if sources is int:
lowercase = [sources]
if sinks is int:
lowercase = [sinks]
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
return
lowercase = sources[0]
lowercase = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__lowerCAmelCase ) > 1 or len(__lowerCAmelCase ) > 1:
lowercase = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
lowercase = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
lowercase = max_input_flow
lowercase = 0
lowercase = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
lowercase = max_input_flow
lowercase = size - 1
def A__ ( self ):
"""simple docstring"""
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = algorithm(self )
class _A :
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = flow_network
lowercase = flow_network.verticesCount
lowercase = flow_network.sourceIndex
lowercase = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
lowercase = flow_network.graph
lowercase = False
def A__ ( self ):
"""simple docstring"""
if not self.executed:
self._algorithm()
lowercase = True
def A__ ( self ):
"""simple docstring"""
pass
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
# use this to save your result
lowercase = -1
def A__ ( self ):
"""simple docstring"""
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
lowercase = [[0] * self.verticies_count for i in range(self.verticies_count )]
lowercase = [0] * self.verticies_count
lowercase = [0] * self.verticies_count
def A__ ( self ):
"""simple docstring"""
lowercase = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
lowercase = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
lowercase = 0
while i < len(__lowerCAmelCase ):
lowercase = vertices_list[i]
lowercase = self.heights[vertex_index]
self.process_vertex(__lowerCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__lowerCAmelCase ) )
lowercase = 0
else:
i += 1
lowercase = sum(self.preflow[self.source_index] )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__lowerCAmelCase , __lowerCAmelCase )
self.relabel(__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
lowercase = self.heights[to_index]
if min_height is not None:
lowercase = min_height + 1
if __name__ == "__main__":
__lowerCAmelCase : int =[0]
__lowerCAmelCase : List[Any] =[3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__lowerCAmelCase : Optional[int] =[[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__lowerCAmelCase : Tuple =FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__lowerCAmelCase : Optional[int] =flow_network.find_maximum_flow()
print(F"""maximum flow is {maximum_flow}""")
| 32 | """simple docstring"""
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__lowerCAmelCase : List[Any] =numpy.array([0, 0])
__lowerCAmelCase : List[str] =numpy.array([0.5, 0.866_0254])
__lowerCAmelCase : List[Any] =numpy.array([1, 0])
__lowerCAmelCase : int =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def UpperCAmelCase__ ( lowerCAmelCase__ :list[numpy.ndarray] , lowerCAmelCase__ :int ) -> list[numpy.ndarray]:
'''simple docstring'''
lowercase = initial_vectors
for _ in range(lowerCAmelCase__ ):
lowercase = iteration_step(lowerCAmelCase__ )
return vectors
def UpperCAmelCase__ ( lowerCAmelCase__ :list[numpy.ndarray] ) -> list[numpy.ndarray]:
'''simple docstring'''
lowercase = []
for i, start_vector in enumerate(vectors[:-1] ):
lowercase = vectors[i + 1]
new_vectors.append(lowerCAmelCase__ )
lowercase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def UpperCAmelCase__ ( lowerCAmelCase__ :numpy.ndarray , lowerCAmelCase__ :float ) -> numpy.ndarray:
'''simple docstring'''
lowercase = numpy.radians(lowerCAmelCase__ )
lowercase , lowercase = numpy.cos(lowerCAmelCase__ ), numpy.sin(lowerCAmelCase__ )
lowercase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :list[numpy.ndarray] ) -> None:
'''simple docstring'''
lowercase = plt.gca()
axes.set_aspect("""equal""" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
lowercase , lowercase = zip(*lowerCAmelCase__ )
plt.plot(lowerCAmelCase__ , lowerCAmelCase__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Optional[int] =iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 32 | 1 |
import warnings
warnings.warn(
"memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: "
"`from accelerate import find_executable_batch_size` to avoid this warning.",
FutureWarning,
)
| 218 |
from manim import *
class __magic_name__ ( lowerCAmelCase_ ):
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =Rectangle(height=0.5 , width=0.5 )
__a =Rectangle(height=0.25 , width=0.25 )
__a =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__a =[mem.copy() for i in range(6 )]
__a =[mem.copy() for i in range(6 )]
__a =VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
__a =VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
__a =VGroup(__snake_case , __snake_case ).arrange(__snake_case , buff=0 )
__a =Text('CPU' , font_size=24 )
__a =Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__snake_case )
__a =[mem.copy() for i in range(4 )]
__a =VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
__a =Text('GPU' , font_size=24 )
__a =Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
gpu.move_to([-1, -1, 0] )
self.add(__snake_case )
__a =[mem.copy() for i in range(6 )]
__a =VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
__a =Text('Model' , font_size=24 )
__a =Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
model.move_to([3, -1.0, 0] )
self.add(__snake_case )
__a =[]
__a =[]
__a =[]
for i, rect in enumerate(__snake_case ):
rect.set_stroke(__snake_case )
__a =Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__snake_case , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__snake_case )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__snake_case , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__snake_case , buff=0.0 )
self.add(__snake_case )
model_cpu_arr.append(__snake_case )
self.add(*__snake_case , *__snake_case , *__snake_case )
__a =[mem.copy() for i in range(6 )]
__a =VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
__a =Text('Loaded Checkpoint' , font_size=24 )
__a =Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
checkpoint.move_to([3, 0.5, 0] )
self.add(__snake_case )
__a =[]
__a =[]
for i, rect in enumerate(__snake_case ):
__a =fill.copy().set_fill(__snake_case , opacity=0.7 )
target.move_to(__snake_case )
ckpt_arr.append(__snake_case )
__a =target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__snake_case )
self.add(*__snake_case , *__snake_case )
__a =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__a =MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__snake_case , __snake_case )
__a =MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(__snake_case , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__snake_case )
__a =MarkupText(
f'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
__a =[meta_mem.copy() for i in range(6 )]
__a =[meta_mem.copy() for i in range(6 )]
__a =VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
__a =VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
__a =VGroup(__snake_case , __snake_case ).arrange(__snake_case , buff=0 )
__a =Text('Disk' , font_size=24 )
__a =Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__snake_case , run_time=3 ) , Write(__snake_case , run_time=1 ) , Create(__snake_case , run_time=1 ) )
__a =[]
for i, rect in enumerate(__snake_case ):
__a =rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__snake_case , run_time=1.5 ) )
self.play(*__snake_case )
self.play(FadeOut(__snake_case ) )
__a =MarkupText(f'Then, the checkpoint is removed from memory\nthrough garbage collection.' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case , run_time=3 ) )
self.play(
FadeOut(__snake_case , __snake_case , *__snake_case , *__snake_case ) , )
self.wait()
| 218 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 357 | """simple docstring"""
import copy
import re
class UpperCamelCase :
SCREAMING_SNAKE_CASE_ = "hp"
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = None
@classmethod
def a_ ( cls, lowerCAmelCase__, lowerCAmelCase__) -> Tuple:
snake_case_ = prefix
snake_case_ = defaults
cls.build_naming_info()
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Optional[Any]:
if len(lowerCAmelCase__) == 0:
return ""
snake_case_ = None
if any(char.isdigit() for char in word):
raise Exception(f'Parameters should not contain numbers: \'{word}\' contains a number')
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1, len(lowerCAmelCase__) + 1):
snake_case_ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
snake_case_ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowerCAmelCase__):
snake_case_ = ''
while integer != 0:
snake_case_ = chr(ord('A') + integer % 10) + s
integer //= 10
return s
snake_case_ = 0
while True:
snake_case_ = word + '#' + int_to_alphabetic(lowerCAmelCase__)
if sword in info["reverse_short_word"]:
continue
else:
snake_case_ = sword
break
snake_case_ = short_word
snake_case_ = word
return short_word
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Dict:
snake_case_ = param_name.split('_')
snake_case_ = [TrialShortNamer.shortname_for_word(lowerCAmelCase__, lowerCAmelCase__) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
snake_case_ = ['', '_']
for separator in separators:
snake_case_ = separator.join(lowerCAmelCase__)
if shortname not in info["reverse_short_param"]:
snake_case_ = shortname
snake_case_ = param_name
return shortname
return param_name
@staticmethod
def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> List[Any]:
snake_case_ = TrialShortNamer.shortname_for_key(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = short_name
snake_case_ = param_name
@classmethod
def a_ ( cls) -> List[str]:
if cls.NAMING_INFO is not None:
return
snake_case_ = {
'short_word': {},
'reverse_short_word': {},
'short_param': {},
'reverse_short_param': {},
}
snake_case_ = list(cls.DEFAULTS.keys())
for k in field_keys:
cls.add_new_param_name(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = info
@classmethod
def a_ ( cls, lowerCAmelCase__) -> List[Any]:
cls.build_naming_info()
assert cls.PREFIX is not None
snake_case_ = [copy.copy(cls.PREFIX)]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'You should provide a default value for the param name {k} with value {v}')
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
snake_case_ = cls.NAMING_INFO['short_param'][k]
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = 1 if v else 0
snake_case_ = '' if isinstance(lowerCAmelCase__, (int, float)) else '-'
snake_case_ = f'{key}{sep}{v}'
name.append(lowerCAmelCase__)
return "_".join(lowerCAmelCase__)
@classmethod
def a_ ( cls, lowerCAmelCase__) -> Optional[Any]:
snake_case_ = repr[len(cls.PREFIX) + 1 :]
if repr == "":
snake_case_ = []
else:
snake_case_ = repr.split('_')
snake_case_ = {}
for value in values:
if "-" in value:
snake_case_ , snake_case_ = value.split('-')
else:
snake_case_ = re.sub('[0-9.]', '', lowerCAmelCase__)
snake_case_ = float(re.sub('[^0-9.]', '', lowerCAmelCase__))
snake_case_ = cls.NAMING_INFO['reverse_short_param'][p_k]
snake_case_ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
snake_case_ = cls.DEFAULTS[k]
return parameters
| 312 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = '▁'
_a = {'vocab_file': 'sentencepiece.bpe.model'}
_a = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
}
}
_a = {
'facebook/mbart-large-en-ro': 1_024,
'facebook/mbart-large-cc25': 1_024,
}
# fmt: off
_a = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE__ : List[int] = []
SCREAMING_SNAKE_CASE__ : List[int] = []
def __init__( self , lowercase_ , lowercase_="<s>" , lowercase_="</s>" , lowercase_="</s>" , lowercase_="<s>" , lowercase_="<unk>" , lowercase_="<pad>" , lowercase_="<mask>" , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_ = None , lowercase_=None , **lowercase_ , ):
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : List[Any] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
UpperCAmelCase_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , tokenizer_file=lowercase_ , src_lang=lowercase_ , tgt_lang=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
UpperCAmelCase_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase_ ) )
UpperCAmelCase_ : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase_ : List[str] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase_ : Tuple = 1
UpperCAmelCase_ : Tuple = len(self.sp_model )
UpperCAmelCase_ : Tuple = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowercase_ )
}
UpperCAmelCase_ : int = {v: k for k, v in self.lang_code_to_id.items()}
UpperCAmelCase_ : int = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCAmelCase_ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCAmelCase_ : str = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
UpperCAmelCase_ : str = src_lang if src_lang is not None else "en_XX"
UpperCAmelCase_ : Any = self.lang_code_to_id[self._src_lang]
UpperCAmelCase_ : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.__dict__.copy()
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : List[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ : List[str] = {}
UpperCAmelCase_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
UpperCAmelCase_ : Optional[Any] = [1] * len(self.prefix_tokens )
UpperCAmelCase_ : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowercase_ )) + suffix_ones
return prefix_ones + ([0] * len(lowercase_ )) + ([0] * len(lowercase_ )) + suffix_ones
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = [self.sep_token_id]
UpperCAmelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
UpperCAmelCase_ : List[str] = src_lang
UpperCAmelCase_ : Any = self(lowercase_ , add_special_tokens=lowercase_ , return_tensors=lowercase_ , **lowercase_ )
UpperCAmelCase_ : Union[str, Any] = self.convert_tokens_to_ids(lowercase_ )
UpperCAmelCase_ : Union[str, Any] = tgt_lang_id
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase_ : Tuple = self.sp_model.PieceToId(lowercase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = "".join(lowercase_ ).replace(lowercase_ , " " ).strip()
return out_string
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
if not os.path.isdir(lowercase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ : Union[str, Any] = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , "wb" ) as fi:
UpperCAmelCase_ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = "en_XX" , lowercase_ = None , lowercase_ = "ro_RO" , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = src_lang
UpperCAmelCase_ : Union[str, Any] = tgt_lang
return super().prepare_seqaseq_batch(lowercase_ , lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.lang_code_to_id[src_lang]
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.lang_code_to_id[lang]
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
| 61 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class __lowerCAmelCase :
def _lowercase ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
raise NotImplementedError()
def _lowercase ( self ) -> int:
'''simple docstring'''
raise NotImplementedError()
class __lowerCAmelCase ( UpperCamelCase__):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = False , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
a__ : str =tokenizer
a__ : List[str] =skip_prompt
a__ : List[Any] =decode_kwargs
# variables used in the streaming process
a__ : Dict =[]
a__ : int =0
a__ : str =True
def _lowercase ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1" )
elif len(value.shape ) > 1:
a__ : Any =value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
a__ : Dict =False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
a__ : Union[str, Any] =self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("\n" ):
a__ : List[Any] =text[self.print_len :]
a__ : List[str] =[]
a__ : Optional[int] =0
# If the last token is a CJK character, we print the characters.
elif len(lowerCAmelCase__ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
a__ : List[str] =text[self.print_len :]
self.print_len += len(lowerCAmelCase__ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
a__ : str =text[self.print_len : text.rfind(" " ) + 1]
self.print_len += len(lowerCAmelCase__ )
self.on_finalized_text(lowerCAmelCase__ )
def _lowercase ( self ) -> Any:
'''simple docstring'''
if len(self.token_cache ) > 0:
a__ : Union[str, Any] =self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
a__ : List[Any] =text[self.print_len :]
a__ : List[str] =[]
a__ : Optional[int] =0
else:
a__ : Union[str, Any] =""
a__ : Any =True
self.on_finalized_text(lowerCAmelCase__ , stream_end=lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> Optional[Any]:
'''simple docstring'''
print(lowerCAmelCase__ , flush=lowerCAmelCase__ , end="" if not stream_end else None )
def _lowercase ( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
class __lowerCAmelCase ( UpperCamelCase__):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = None , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : str =Queue()
a__ : Optional[Any] =None
a__ : Any =timeout
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> List[str]:
'''simple docstring'''
self.text_queue.put(lowerCAmelCase__ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ) -> Dict:
'''simple docstring'''
return self
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : int =self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 95 | 0 |
'''simple docstring'''
__lowercase : Optional[int] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__lowercase : Dict = [{'type': 'code', 'content': INSTALL_CONTENT}]
__lowercase : Union[str, Any] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 294 |
'''simple docstring'''
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('To use the rich extension, install rich with `pip install rich`')
| 294 | 1 |
'''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
lowerCAmelCase_ : List[str] = HfApi()
lowerCAmelCase_ : str = {}
# fmt: off
lowerCAmelCase_ : Any = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
lowerCAmelCase_ : List[Any] = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
lowerCAmelCase_ : int = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
lowerCAmelCase_ : List[Any] = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
lowerCAmelCase_ : Tuple = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
lowerCAmelCase_ : List[str] = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
lowerCAmelCase_ : Optional[Any] = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
lowerCAmelCase_ : Optional[Any] = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
lowerCAmelCase_ : Optional[int] = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
lowerCAmelCase_ : List[Any] = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
lowerCAmelCase_ : Tuple = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
lowerCAmelCase_ : str = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
lowerCAmelCase_ : int = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
lowerCAmelCase_ : Union[str, Any] = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
lowerCAmelCase_ : List[str] = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
lowerCAmelCase_ : Any = api.list_models(filter='diffusers')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
lowerCAmelCase_ : Union[str, Any] = '/home/patrick/google_checkpoints/' + mod.modelId.split('/')[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith('CompVis'):
lowerCAmelCase_ : Dict = UNetaDModel.from_pretrained(local_checkpoint, subfolder='unet')
else:
lowerCAmelCase_ : int = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
lowerCAmelCase_ : Optional[int] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
lowerCAmelCase_ : str = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['_'.join('_'.join(mod.modelId.split('/')).split('-'))], atol=1e-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 63 |
'''simple docstring'''
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Dict , *,
__a : int = 4 , __a : int = 7_68 , __a : int , __a : int , ):
super().__init__()
_a = nn.Parameter(torch.zeros(__a ) )
# parameters for additional clip time embeddings
_a = nn.Linear(__a , __a )
_a = nn.Linear(__a , __a )
# parameters for encoder hidden states
_a = clip_extra_context_tokens
_a = nn.Linear(
__a , self.clip_extra_context_tokens * cross_attention_dim )
_a = nn.Linear(__a , __a )
_a = nn.LayerNorm(__a )
def UpperCamelCase__ ( self : Optional[Any] , *, __a : Tuple , __a : Union[str, Any] , __a : Any , __a : List[Any] ):
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
_a = image_embeddings.shape[0]
_a = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
_a = classifier_free_guidance_embeddings.expand(
__a , -1 )
_a = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
_a = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
_a = self.embedding_proj(__a )
_a = self.clip_image_embeddings_project_to_time_embeddings(__a )
_a = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
_a = self.clip_extra_context_tokens_proj(__a )
_a = clip_extra_context_tokens.reshape(__a , -1 , self.clip_extra_context_tokens )
_a = clip_extra_context_tokens.permute(0 , 2 , 1 )
_a = self.encoder_hidden_states_proj(__a )
_a = self.text_encoder_hidden_states_norm(__a )
_a = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 63 | 1 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=99 , _lowercase=32 , _lowercase=2 , _lowercase=4 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=16 , _lowercase=2 , _lowercase=0.02 , _lowercase=3 , _lowercase=4 , _lowercase=None , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = 13
_lowerCAmelCase = 7
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = 99
_lowerCAmelCase = 384
_lowerCAmelCase = 2
_lowerCAmelCase = 4
_lowerCAmelCase = 37
_lowerCAmelCase = """gelu"""
_lowerCAmelCase = 0.1
_lowerCAmelCase = 0.1
_lowerCAmelCase = 512
_lowerCAmelCase = 16
_lowerCAmelCase = 2
_lowerCAmelCase = 0.02
_lowerCAmelCase = 3
_lowerCAmelCase = 4
_lowerCAmelCase = 128
_lowerCAmelCase = 2
_lowerCAmelCase = 9
_lowerCAmelCase = 1
_lowerCAmelCase = None
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_lowercase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = TFConvBertModel(config=_lowercase )
_lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_lowerCAmelCase = [input_ids, input_mask]
_lowerCAmelCase = model(_lowercase )
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = TFConvBertForMaskedLM(config=_lowercase )
_lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = TFConvBertForSequenceClassification(config=_lowercase )
_lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.num_choices
_lowerCAmelCase = TFConvBertForMultipleChoice(config=_lowercase )
_lowerCAmelCase = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = TFConvBertForTokenClassification(config=_lowercase )
_lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = TFConvBertForQuestionAnswering(config=_lowercase )
_lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowercase : Union[str, Any] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_lowercase : str = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowercase : Optional[Any] = False
_lowercase : Dict = False
_lowercase : Any = False
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFConvBertModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowercase )
@slow
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = True
_lowerCAmelCase = True
if hasattr(_lowercase , """use_cache""" ):
_lowerCAmelCase = True
_lowerCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
_lowerCAmelCase = getattr(self.model_tester , """key_length""" , _lowercase )
for model_class in self.all_model_classes:
_lowerCAmelCase = self._prepare_for_class(_lowercase , _lowercase )
_lowerCAmelCase = model_class(_lowercase )
_lowerCAmelCase = len(model(_lowercase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowercase , saved_model=_lowercase )
_lowerCAmelCase = os.path.join(_lowercase , """saved_model""" , """1""" )
_lowerCAmelCase = tf.keras.models.load_model(_lowercase )
_lowerCAmelCase = model(_lowercase )
if self.is_encoder_decoder:
_lowerCAmelCase = outputs["""encoder_hidden_states"""]
_lowerCAmelCase = outputs["""encoder_attentions"""]
else:
_lowerCAmelCase = outputs["""hidden_states"""]
_lowerCAmelCase = outputs["""attentions"""]
self.assertEqual(len(_lowercase ) , _lowercase )
_lowerCAmelCase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = True
_lowerCAmelCase = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
_lowerCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
_lowerCAmelCase = getattr(self.model_tester , """key_length""" , _lowercase )
_lowerCAmelCase = getattr(self.model_tester , """key_length""" , _lowercase )
def check_decoder_attentions_output(_lowercase ):
_lowerCAmelCase = len(_lowercase )
self.assertEqual(out_len % 2 , 0 )
_lowerCAmelCase = outputs.decoder_attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_lowercase ):
_lowerCAmelCase = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = model_class(_lowercase )
_lowerCAmelCase = model(self._prepare_for_class(_lowercase , _lowercase ) )
_lowerCAmelCase = len(_lowercase )
self.assertEqual(config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
if self.is_encoder_decoder:
_lowerCAmelCase = model_class(_lowercase )
_lowerCAmelCase = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(config.output_hidden_states , _lowercase )
check_decoder_attentions_output(_lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_lowerCAmelCase = True
_lowerCAmelCase = model_class(_lowercase )
_lowerCAmelCase = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
# Check attention is always last and order is fine
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = model_class(_lowercase )
_lowerCAmelCase = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_lowercase ) )
self.assertEqual(model.config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
_lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowerCAmelCase = model(_lowercase )[0]
_lowerCAmelCase = [1, 6, 768]
self.assertEqual(output.shape , _lowercase )
_lowerCAmelCase = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _lowercase , atol=1e-4 )
| 229 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = str(id_ )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = []
_lowerCAmelCase = {} # {vertex:distance}
def __lt__( self , _lowercase ):
"""simple docstring"""
return self.key < other.key
def __repr__( self ):
"""simple docstring"""
return self.id
def _lowercase ( self , _lowercase ):
"""simple docstring"""
self.neighbors.append(_lowercase )
def _lowercase ( self , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = weight
def A (__lowerCamelCase :List[Any] , __lowerCamelCase :Union[str, Any] , __lowerCamelCase :Dict , __lowerCamelCase :Optional[int] ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __lowerCamelCase )
graph[b - 1].add_edge(graph[a - 1] , __lowerCamelCase )
def A (__lowerCamelCase :list , __lowerCamelCase :Vertex ):
_lowerCAmelCase = []
for u in graph:
_lowerCAmelCase = math.inf
_lowerCAmelCase = None
_lowerCAmelCase = 0
_lowerCAmelCase = graph[:]
while q:
_lowerCAmelCase = min(__lowerCamelCase )
q.remove(__lowerCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_lowerCAmelCase = u
_lowerCAmelCase = u.edges[v.id]
for i in range(1 , len(__lowerCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def A (__lowerCamelCase :list , __lowerCamelCase :Vertex ):
for u in graph:
_lowerCAmelCase = math.inf
_lowerCAmelCase = None
_lowerCAmelCase = 0
_lowerCAmelCase = list(__lowerCamelCase )
hq.heapify(__lowerCamelCase )
while h:
_lowerCAmelCase = hq.heappop(__lowerCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_lowerCAmelCase = u
_lowerCAmelCase = u.edges[v.id]
hq.heapify(__lowerCamelCase )
for i in range(1 , len(__lowerCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def A ():
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 229 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ : str = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 32 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
UpperCAmelCase_ : Any = {'UserAgent': UserAgent().random}
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] ) -> dict:
"""simple docstring"""
a_ : Tuple = script.contents[0]
a_ : int = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[Any]:
a_ : Tuple = F"""https://www.instagram.com/{username}/"""
a_ : Optional[Any] = self.get_json()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> dict:
a_ : Any = requests.get(self.url , headers=SCREAMING_SNAKE_CASE__ ).text
a_ : Dict = BeautifulSoup(SCREAMING_SNAKE_CASE__ , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Union[str, Any] ) -> str:
return F"""{self.__class__.__name__}('{self.username}')"""
def __str__( self : Optional[int] ) -> str:
return F"""{self.fullname} ({self.username}) is {self.biography}"""
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
return self.user_data["username"]
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
return self.user_data["full_name"]
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return self.user_data["biography"]
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return self.user_data["business_email"]
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return self.user_data["external_url"]
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> bool:
return self.user_data["is_verified"]
@property
def SCREAMING_SNAKE_CASE ( self : Any ) -> bool:
return self.user_data["is_private"]
def SCREAMING_SNAKE_CASE_ ( __A : str = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
a_ : int = InstagramUser(__A )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __A )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_50
assert instagram_user.number_of_followers > 12_00_00
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : Union[str, Any] = InstagramUser('github')
print(instagram_user)
print(F'{instagram_user.number_of_posts = }')
print(F'{instagram_user.number_of_followers = }')
print(F'{instagram_user.number_of_followings = }')
print(F'{instagram_user.email = }')
print(F'{instagram_user.website = }')
print(F'{instagram_user.profile_picture_url = }')
print(F'{instagram_user.is_verified = }')
print(F'{instagram_user.is_private = }')
| 32 | 1 |
'''simple docstring'''
def _A ( snake_case ) -> Dict:
_lowercase : Union[str, Any] = len(_lowerCamelCase )
for i in range(length - 1 ):
_lowercase : Union[str, Any] = i
for k in range(i + 1 , _lowerCamelCase ):
if collection[k] < collection[least]:
_lowercase : int = k
if least != i:
_lowercase : Optional[int] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
_snake_case = input('Enter numbers separated by a comma:\n').strip()
_snake_case = [int(item) for item in user_input.split(',')]
print(selection_sort(unsorted))
| 363 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : int = 'Wav2Vec2FeatureExtractor'
_SCREAMING_SNAKE_CASE : List[str] = 'AutoTokenizer'
def __init__( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
super().__init__(_UpperCamelCase , _UpperCamelCase )
_lowercase : List[Any] = self.feature_extractor
_lowercase : Optional[Any] = False
@classmethod
def _lowerCamelCase ( cls , _UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
try:
return super().from_pretrained(_UpperCamelCase , **_UpperCamelCase )
except OSError:
warnings.warn(
f'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , _UpperCamelCase , )
_lowercase : Dict = WavaVecaFeatureExtractor.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
_lowercase : str = WavaVecaCTCTokenizer.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
return cls(feature_extractor=_UpperCamelCase , tokenizer=_UpperCamelCase )
def __call__( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*_UpperCamelCase , **_UpperCamelCase )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
_lowercase : int = kwargs.pop("raw_speech" )
else:
_lowercase : List[Any] = kwargs.pop("audio" , _UpperCamelCase )
_lowercase : List[Any] = kwargs.pop("sampling_rate" , _UpperCamelCase )
_lowercase : Union[str, Any] = kwargs.pop("text" , _UpperCamelCase )
if len(_UpperCamelCase ) > 0:
_lowercase : int = args[0]
_lowercase : Any = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
_lowercase : Dict = self.feature_extractor(_UpperCamelCase , *_UpperCamelCase , sampling_rate=_UpperCamelCase , **_UpperCamelCase )
if text is not None:
_lowercase : Union[str, Any] = self.tokenizer(_UpperCamelCase , **_UpperCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowercase : int = encodings["input_ids"]
return inputs
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*_UpperCamelCase , **_UpperCamelCase )
_lowercase : List[Any] = kwargs.pop("input_features" , _UpperCamelCase )
_lowercase : Any = kwargs.pop("labels" , _UpperCamelCase )
if len(_UpperCamelCase ) > 0:
_lowercase : Any = args[0]
_lowercase : Any = args[1:]
if input_features is not None:
_lowercase : Any = self.feature_extractor.pad(_UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase )
if labels is not None:
_lowercase : int = self.tokenizer.pad(_UpperCamelCase , **_UpperCamelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_lowercase : Optional[Any] = labels["input_ids"]
return input_features
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
@contextmanager
def _lowerCamelCase ( self ):
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
_lowercase : Optional[Any] = True
_lowercase : Dict = self.tokenizer
yield
_lowercase : List[str] = self.feature_extractor
_lowercase : List[str] = False
| 199 | 0 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class A :
@property
def lowercase_ (self : Tuple ) -> str:
"""simple docstring"""
return self.get_dummy_input()
@property
def lowercase_ (self : str ) -> Tuple:
"""simple docstring"""
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(f"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" )
def lowercase_ (self : Dict , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : str=False , __UpperCAmelCase : Optional[Any]=False , ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = 4
UpperCAmelCase__ = 3_2
UpperCAmelCase__ = (3_2, 3_2)
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = torch.device(__UpperCAmelCase )
UpperCAmelCase__ = (batch_size, num_channels) + sizes
UpperCAmelCase__ = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=__UpperCAmelCase )
UpperCAmelCase__ = {"hidden_states": hidden_states}
if include_temb:
UpperCAmelCase__ = 1_2_8
UpperCAmelCase__ = randn_tensor((batch_size, temb_channels) , generator=__UpperCAmelCase , device=__UpperCAmelCase )
if include_res_hidden_states_tuple:
UpperCAmelCase__ = torch.manual_seed(1 )
UpperCAmelCase__ = (randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=__UpperCAmelCase ),)
if include_encoder_hidden_states:
UpperCAmelCase__ = floats_tensor((batch_size, 3_2, 3_2) ).to(__UpperCAmelCase )
if include_skip_sample:
UpperCAmelCase__ = randn_tensor(((batch_size, 3) + sizes) , generator=__UpperCAmelCase , device=__UpperCAmelCase )
return dummy_input
def lowercase_ (self : Optional[int] ) -> int:
"""simple docstring"""
UpperCAmelCase__ = {
"in_channels": 3_2,
"out_channels": 3_2,
"temb_channels": 1_2_8,
}
if self.block_type == "up":
UpperCAmelCase__ = 3_2
if self.block_type == "mid":
init_dict.pop("out_channels" )
UpperCAmelCase__ = self.dummy_input
return init_dict, inputs_dict
def lowercase_ (self : List[str] , __UpperCAmelCase : Any ) -> str:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase__ = self.block_class(**__UpperCAmelCase )
unet_block.to(__UpperCAmelCase )
unet_block.eval()
with torch.no_grad():
UpperCAmelCase__ = unet_block(**__UpperCAmelCase )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase__ = output[0]
self.assertEqual(output.shape , self.output_shape )
UpperCAmelCase__ = output[0, -1, -3:, -3:]
UpperCAmelCase__ = torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase )
assert torch_all_close(output_slice.flatten() , __UpperCAmelCase , atol=5E-3 )
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" )
def lowercase_ (self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase__ = self.block_class(**__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
UpperCAmelCase__ = model(**__UpperCAmelCase )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase__ = output[0]
UpperCAmelCase__ = torch.device(__UpperCAmelCase )
UpperCAmelCase__ = randn_tensor(output.shape , device=__UpperCAmelCase )
UpperCAmelCase__ = torch.nn.functional.mse_loss(__UpperCAmelCase , __UpperCAmelCase )
loss.backward()
| 65 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _a :
"""simple docstring"""
@property
def __A ( self : Union[str, Any] ):
return self.get_dummy_input()
@property
def __A ( self : int ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def __A ( self : Union[str, Any] , UpperCAmelCase : List[Any]=True , UpperCAmelCase : str=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : Optional[Any]=False , ):
A_ = 4
A_ = 32
A_ = (32, 32)
A_ = torch.manual_seed(0 )
A_ = torch.device(UpperCAmelCase )
A_ = (batch_size, num_channels) + sizes
A_ = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase )
A_ = {"hidden_states": hidden_states}
if include_temb:
A_ = 128
A_ = randn_tensor((batch_size, temb_channels) , generator=UpperCAmelCase , device=UpperCAmelCase )
if include_res_hidden_states_tuple:
A_ = torch.manual_seed(1 )
A_ = (randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase ),)
if include_encoder_hidden_states:
A_ = floats_tensor((batch_size, 32, 32) ).to(UpperCAmelCase )
if include_skip_sample:
A_ = randn_tensor(((batch_size, 3) + sizes) , generator=UpperCAmelCase , device=UpperCAmelCase )
return dummy_input
def __A ( self : Optional[int] ):
A_ = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 128,
}
if self.block_type == "up":
A_ = 32
if self.block_type == "mid":
init_dict.pop("out_channels" )
A_ = self.dummy_input
return init_dict, inputs_dict
def __A ( self : List[str] , UpperCAmelCase : Optional[Any] ):
A_ , A_ = self.prepare_init_args_and_inputs_for_common()
A_ = self.block_class(**UpperCAmelCase )
unet_block.to(UpperCAmelCase )
unet_block.eval()
with torch.no_grad():
A_ = unet_block(**UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = output[0]
self.assertEqual(output.shape , self.output_shape )
A_ = output[0, -1, -3:, -3:]
A_ = torch.tensor(UpperCAmelCase ).to(UpperCAmelCase )
assert torch_all_close(output_slice.flatten() , UpperCAmelCase , atol=5E-3 )
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" )
def __A ( self : Union[str, Any] ):
A_ , A_ = self.prepare_init_args_and_inputs_for_common()
A_ = self.block_class(**UpperCAmelCase )
model.to(UpperCAmelCase )
model.train()
A_ = model(**UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = output[0]
A_ = torch.device(UpperCAmelCase )
A_ = randn_tensor(output.shape , device=UpperCAmelCase )
A_ = torch.nn.functional.mse_loss(UpperCAmelCase , UpperCAmelCase )
loss.backward() | 312 | 0 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowerCAmelCase (__lowerCAmelCase ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
_UpperCAmelCase : Union[str, Any] = precision
_UpperCAmelCase : int = ceil(precision / 14 )
_UpperCAmelCase : Optional[int] = 426_880 * Decimal(10_005 ).sqrt()
_UpperCAmelCase : List[Any] = 1
_UpperCAmelCase : List[Any] = 13_591_409
_UpperCAmelCase : Any = Decimal(lowerCamelCase_ )
for k in range(1 , lowerCamelCase_ ):
_UpperCAmelCase : Dict = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowerCamelCase_ ) ** 3)
linear_term += 545_140_134
exponential_term *= -262_537_412_640_768_000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
lowerCamelCase__ = 50
print(F'''The first {n} digits of pi is: {pi(n)}''')
| 358 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCamelCase__ = logging.get_logger(__name__)
# General docstring
lowerCamelCase__ = 'RegNetConfig'
# Base docstring
lowerCamelCase__ = 'facebook/regnet-y-040'
lowerCamelCase__ = [1, 1_088, 7, 7]
# Image classification docstring
lowerCamelCase__ = 'facebook/regnet-y-040'
lowerCamelCase__ = 'tabby, tabby cat'
lowerCamelCase__ = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 3 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : Optional[str] = "relu" , **lowerCamelCase__ : Tuple , ) ->Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_UpperCAmelCase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
_UpperCAmelCase : Dict = tf.keras.layers.ConvaD(
filters=lowerCamelCase__ , kernel_size=lowerCamelCase__ , strides=lowerCamelCase__ , padding="VALID" , groups=lowerCamelCase__ , use_bias=lowerCamelCase__ , name="convolution" , )
_UpperCAmelCase : List[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
_UpperCAmelCase : int = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Tuple ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.convolution(self.padding(lowerCamelCase__ ) )
_UpperCAmelCase : Optional[Any] = self.normalization(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self.activation(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : str , lowerCamelCase__ : RegNetConfig , **lowerCamelCase__ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = config.num_channels
_UpperCAmelCase : Any = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : List[str] = shape_list(lowerCamelCase__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_UpperCAmelCase : Optional[Any] = tf.transpose(lowerCamelCase__ , perm=(0, 2, 3, 1) )
_UpperCAmelCase : List[Any] = self.embedder(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 2 , **lowerCamelCase__ : int ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : int = tf.keras.layers.ConvaD(
filters=lowerCamelCase__ , kernel_size=1 , strides=lowerCamelCase__ , use_bias=lowerCamelCase__ , name="convolution" )
_UpperCAmelCase : Any = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : bool = False ) ->tf.Tensor:
'''simple docstring'''
return self.normalization(self.convolution(lowerCamelCase__ ) , training=lowerCamelCase__ )
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Any , lowerCamelCase__ : int , lowerCamelCase__ : int , **lowerCamelCase__ : Optional[int] ) ->Dict:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase__ , name="pooler" )
_UpperCAmelCase : int = [
tf.keras.layers.ConvaD(filters=lowerCamelCase__ , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=lowerCamelCase__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Optional[int] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.pooler(lowerCamelCase__ )
for layer_module in self.attention:
_UpperCAmelCase : str = layer_module(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = hidden_state * pooled
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Dict , lowerCamelCase__ : RegNetConfig , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : Any ) ->List[str]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = in_channels != out_channels or stride != 1
_UpperCAmelCase : List[str] = max(1 , out_channels // config.groups_width )
_UpperCAmelCase : List[str] = (
TFRegNetShortCut(lowerCamelCase__ , stride=lowerCamelCase__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_UpperCAmelCase : Optional[int] = [
TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
lowerCamelCase__ , stride=lowerCamelCase__ , groups=lowerCamelCase__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=lowerCamelCase__ , name="layer.2" ),
]
_UpperCAmelCase : Union[str, Any] = ACTaFN[config.hidden_act]
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Any = hidden_state
for layer_module in self.layers:
_UpperCAmelCase : List[Any] = layer_module(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = self.shortcut(lowerCamelCase__ )
hidden_state += residual
_UpperCAmelCase : List[Any] = self.activation(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , lowerCamelCase__ : RegNetConfig , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = in_channels != out_channels or stride != 1
_UpperCAmelCase : Optional[int] = max(1 , out_channels // config.groups_width )
_UpperCAmelCase : Union[str, Any] = (
TFRegNetShortCut(lowerCamelCase__ , stride=lowerCamelCase__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
_UpperCAmelCase : List[Any] = [
TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
lowerCamelCase__ , stride=lowerCamelCase__ , groups=lowerCamelCase__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(lowerCamelCase__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(lowerCamelCase__ , kernel_size=1 , activation=lowerCamelCase__ , name="layer.3" ),
]
_UpperCAmelCase : int = ACTaFN[config.hidden_act]
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : str ) ->Any:
'''simple docstring'''
_UpperCAmelCase : int = hidden_state
for layer_module in self.layers:
_UpperCAmelCase : Tuple = layer_module(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self.shortcut(lowerCamelCase__ )
hidden_state += residual
_UpperCAmelCase : Tuple = self.activation(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : str , lowerCamelCase__ : RegNetConfig , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int = 2 , lowerCamelCase__ : int = 2 , **lowerCamelCase__ : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : str = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
_UpperCAmelCase : List[str] = [
# downsampling is done in the first layer with stride of 2
layer(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , stride=lowerCamelCase__ , name="layers.0" ),
*[layer(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[str] ) ->List[str]:
'''simple docstring'''
for layer_module in self.layers:
_UpperCAmelCase : Optional[int] = layer_module(lowerCamelCase__ )
return hidden_state
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
def __init__( self : Dict , lowerCamelCase__ : RegNetConfig , **lowerCamelCase__ : int ) ->Dict:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowerCamelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
_UpperCAmelCase : Dict = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowerCamelCase__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , depth=lowerCamelCase__ , name=F"""stages.{i+1}""" ) )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True ) ->TFBaseModelOutputWithNoAttention:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_UpperCAmelCase : Optional[Any] = hidden_states + (hidden_state,)
_UpperCAmelCase : Dict = stage_module(lowerCamelCase__ )
if output_hidden_states:
_UpperCAmelCase : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase__ , hidden_states=lowerCamelCase__ )
@keras_serializable
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
lowerCAmelCase : Optional[Any] = RegNetConfig
def __init__( self : Union[str, Any] , lowerCamelCase__ : Any , **lowerCamelCase__ : str ) ->int:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = config
_UpperCAmelCase : Union[str, Any] = TFRegNetEmbeddings(lowerCamelCase__ , name="embedder" )
_UpperCAmelCase : Union[str, Any] = TFRegNetEncoder(lowerCamelCase__ , name="encoder" )
_UpperCAmelCase : Union[str, Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCamelCase__ , name="pooler" )
@unpack_inputs
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , ) ->TFBaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
_UpperCAmelCase : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Union[str, Any] = self.embedder(lowerCamelCase__ , training=lowerCamelCase__ )
_UpperCAmelCase : str = self.encoder(
lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ , training=lowerCamelCase__ )
_UpperCAmelCase : Dict = encoder_outputs[0]
_UpperCAmelCase : Dict = self.pooler(lowerCamelCase__ )
# Change to NCHW output format have uniformity in the modules
_UpperCAmelCase : Union[str, Any] = tf.transpose(lowerCamelCase__ , perm=(0, 3, 1, 2) )
_UpperCAmelCase : Tuple = tf.transpose(lowerCamelCase__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_UpperCAmelCase : List[str] = tuple([tf.transpose(lowerCamelCase__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase__ , pooler_output=lowerCamelCase__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Tuple = RegNetConfig
lowerCAmelCase : Tuple = "regnet"
lowerCAmelCase : Union[str, Any] = "pixel_values"
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )}
lowerCamelCase__ = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCamelCase__ = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , UpperCAmelCase__ , )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Any , lowerCamelCase__ : RegNetConfig , *lowerCamelCase__ : Any , **lowerCamelCase__ : List[str] ) ->Optional[int]:
'''simple docstring'''
super().__init__(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = TFRegNetMainLayer(lowerCamelCase__ , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : tf.Tensor , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Any=False , ) ->Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Union[str, Any] = self.regnet(
pixel_values=lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ , training=lowerCamelCase__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , UpperCAmelCase__ , )
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
def __init__( self : str , lowerCamelCase__ : RegNetConfig , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Union[str, Any] ) ->Any:
'''simple docstring'''
super().__init__(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = config.num_labels
_UpperCAmelCase : Dict = TFRegNetMainLayer(lowerCamelCase__ , name="regnet" )
# classification head
_UpperCAmelCase : str = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : tf.Tensor = None , lowerCamelCase__ : tf.Tensor = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict=False , ) ->Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
'''simple docstring'''
_UpperCAmelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : str = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Union[str, Any] = self.regnet(
lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ , training=lowerCamelCase__ )
_UpperCAmelCase : int = outputs.pooler_output if return_dict else outputs[1]
_UpperCAmelCase : Dict = self.classifier[0](lowerCamelCase__ )
_UpperCAmelCase : str = self.classifier[1](lowerCamelCase__ )
_UpperCAmelCase : Tuple = None if labels is None else self.hf_compute_loss(labels=lowerCamelCase__ , logits=lowerCamelCase__ )
if not return_dict:
_UpperCAmelCase : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowerCamelCase__ , logits=lowerCamelCase__ , hidden_states=outputs.hidden_states )
| 322 | 0 |
"""simple docstring"""
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return EnvironmentCommand()
class UpperCamelCase ( snake_case_ ):
@staticmethod
def _lowercase ( UpperCAmelCase__ : ArgumentParser ) -> Tuple:
_a : List[str] = parser.add_parser("""env""" )
download_parser.set_defaults(func=UpperCAmelCase__ )
def _lowercase ( self : str ) -> List[str]:
_a : int = huggingface_hub.__version__
_a : Any = """not installed"""
_a : str = """NA"""
if is_torch_available():
import torch
_a : List[Any] = torch.__version__
_a : Dict = torch.cuda.is_available()
_a : Any = """not installed"""
if is_transformers_available():
import transformers
_a : List[Any] = transformers.__version__
_a : Union[str, Any] = """not installed"""
if is_accelerate_available():
import accelerate
_a : Optional[int] = accelerate.__version__
_a : Dict = """not installed"""
if is_xformers_available():
import xformers
_a : List[Any] = xformers.__version__
_a : Dict = {
"""`diffusers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""PyTorch version (GPU?)""": f"""{pt_version} ({pt_cuda_available})""",
"""Huggingface_hub version""": hub_version,
"""Transformers version""": transformers_version,
"""Accelerate version""": accelerate_version,
"""xFormers version""": xformers_version,
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(UpperCAmelCase__ ) )
return info
@staticmethod
def _lowercase ( UpperCAmelCase__ : int ) -> int:
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 294 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['PerceiverFeatureExtractor']
_snake_case = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 294 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCAmelCase__ : Union[str, Any] =logging.getLogger(__name__)
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
return (preds == labels).mean()
@dataclass
class __lowercase :
__A = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__A = field(
default=a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__A = field(
default=a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__A = field(
default=a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowercase :
__A = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
__A = field(metadata={"""help""": """Should contain the data files for the task."""} )
__A = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__A = field(
default=a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _lowercase ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCamelCase , lowerCamelCase , lowerCamelCase =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , _UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
try:
lowerCamelCase =processors[data_args.task_name]()
lowerCamelCase =processor.get_labels()
lowerCamelCase =len(_UpperCAmelCase )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_UpperCAmelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
lowerCamelCase =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCamelCase =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , )
# Get datasets
lowerCamelCase =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowerCamelCase =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_UpperCAmelCase ) -> Dict:
lowerCamelCase =np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_UpperCAmelCase , p.label_ids )}
# Data collator
lowerCamelCase =DataCollatorWithPadding(_UpperCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowerCamelCase =Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , compute_metrics=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCamelCase ={}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCamelCase =trainer.evaluate()
lowerCamelCase =os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(_UpperCAmelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , _UpperCAmelCase , _UpperCAmelCase )
writer.write("""%s = %s\n""" % (key, value) )
results.update(_UpperCAmelCase )
return results
def _lowercase ( _UpperCAmelCase ) -> Union[str, Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 371 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _lowercase ( _UpperCAmelCase = "isbn/0140328726" ) -> dict:
lowerCamelCase =olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""" ) != 1:
lowerCamelCase =F"""{olid} is not a valid Open Library olid"""
raise ValueError(_UpperCAmelCase )
return requests.get(F"""https://openlibrary.org/{new_olid}.json""" ).json()
def _lowercase ( _UpperCAmelCase ) -> dict:
lowerCamelCase ={
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
lowerCamelCase ={better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
lowerCamelCase =[
get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""]
]
lowerCamelCase =data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase =""", """.join(_UpperCAmelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
UpperCAmelCase__ : List[str] =input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.")
continue
print(F"\nSearching Open Library for ISBN: {isbn}...\n")
try:
UpperCAmelCase__ : Dict =summarize_book(get_openlibrary_data(F"isbn/{isbn}"))
print('''\n'''.join(F"{key}: {value}" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"Sorry, there are no results for ISBN: {isbn}.")
| 262 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def UpperCamelCase_ ( snake_case_ : Optional[int] , snake_case_ : Optional[Any]=10 ) -> Tuple:
'''simple docstring'''
__lowerCAmelCase = []
for _ in range(snake_case_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def UpperCamelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : str=10 ) -> Tuple:
'''simple docstring'''
__lowerCAmelCase = []
for step in range(snake_case_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = os.path.join(snake_case_ , """schedule.bin""" )
torch.save(scheduler.state_dict() , snake_case_ )
__lowerCAmelCase = torch.load(snake_case_ )
scheduler.load_state_dict(snake_case_ )
return lrs
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def a ( self : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict:
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for a, b in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertAlmostEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , delta=SCREAMING_SNAKE_CASE__ )
def a ( self : int ) -> List[Any]:
__lowerCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
__lowerCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__lowerCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(1_00 ):
__lowerCAmelCase = criterion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def a ( self : Optional[Any] ) -> int:
__lowerCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = torch.tensor([0.4, 0.2, -0.5] )
__lowerCAmelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__lowerCAmelCase = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-3_0, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=SCREAMING_SNAKE_CASE__ , weight_decay=0.0 , relative_step=SCREAMING_SNAKE_CASE__ , scale_parameter=SCREAMING_SNAKE_CASE__ , warmup_init=SCREAMING_SNAKE_CASE__ , )
for _ in range(10_00 ):
__lowerCAmelCase = criterion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : List[str] = nn.Linear(50 , 50 ) if is_torch_available() else None
_SCREAMING_SNAKE_CASE : Tuple = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
_SCREAMING_SNAKE_CASE : int = 10
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str]=None ) -> List[Any]:
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for a, b in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertAlmostEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , delta=SCREAMING_SNAKE_CASE__ , msg=SCREAMING_SNAKE_CASE__ )
def a ( self : List[Any] ) -> str:
__lowerCAmelCase = {"""num_warmup_steps""": 2, """num_training_steps""": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
__lowerCAmelCase = {
get_constant_schedule: ({}, [1_0.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"""num_warmup_steps""": 4},
[0.0, 2.5, 5.0, 7.5, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 1_0.0, 8.7_5, 7.5, 6.2_5, 5.0, 3.7_5, 2.5, 1.2_5],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 1_0.0, 9.6_1, 8.5_3, 6.9_1, 5.0, 3.0_8, 1.4_6, 0.3_8],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, """num_cycles""": 2},
[0.0, 5.0, 1_0.0, 8.5_3, 5.0, 1.4_6, 1_0.0, 8.5_3, 5.0, 1.4_6],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, """power""": 2.0, """lr_end""": 1e-7},
[0.0, 5.0, 1_0.0, 7.6_5_6, 5.6_2_5, 3.9_0_6, 2.5, 1.4_0_6, 0.6_2_5, 0.1_5_6],
),
get_inverse_sqrt_schedule: (
{"""num_warmup_steps""": 2},
[0.0, 5.0, 1_0.0, 8.1_6_5, 7.0_7_1, 6.3_2_5, 5.7_7_4, 5.3_4_5, 5.0, 4.7_1_4],
),
}
for scheduler_func, data in scheds.items():
__lowerCAmelCase , __lowerCAmelCase = data
__lowerCAmelCase = scheduler_func(self.optimizer , **SCREAMING_SNAKE_CASE__ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
__lowerCAmelCase = unwrap_schedule(SCREAMING_SNAKE_CASE__ , self.num_steps )
self.assertListAlmostEqual(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tol=1e-2 , msg=f"""failed for {scheduler_func} in normal scheduler""" , )
__lowerCAmelCase = scheduler_func(self.optimizer , **SCREAMING_SNAKE_CASE__ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(SCREAMING_SNAKE_CASE__ ) # wrap to test picklability of the schedule
__lowerCAmelCase = unwrap_and_save_reload_schedule(SCREAMING_SNAKE_CASE__ , self.num_steps )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , msg=f"""failed for {scheduler_func} in save and reload""" )
class _lowercase :
'''simple docstring'''
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> int:
__lowerCAmelCase = fn
def __call__( self : Any , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Tuple:
return self.fn(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@classmethod
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : str ) -> Union[str, Any]:
__lowerCAmelCase = list(map(self , scheduler.lr_lambdas ) )
| 229 | '''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : jnp.ndarray
@flax_register_to_config
class _lowercase ( nn.Module , UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : int = 32
_SCREAMING_SNAKE_CASE : int = 4
_SCREAMING_SNAKE_CASE : int = 4
_SCREAMING_SNAKE_CASE : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_SCREAMING_SNAKE_CASE : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
_SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False
_SCREAMING_SNAKE_CASE : Tuple[int] = (320, 640, 1280, 1280)
_SCREAMING_SNAKE_CASE : int = 2
_SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8
_SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None
_SCREAMING_SNAKE_CASE : int = 1280
_SCREAMING_SNAKE_CASE : float = 0.0
_SCREAMING_SNAKE_CASE : bool = False
_SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
_SCREAMING_SNAKE_CASE : bool = True
_SCREAMING_SNAKE_CASE : int = 0
_SCREAMING_SNAKE_CASE : bool = False
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : jax.random.KeyArray ) -> FrozenDict:
# init input tensors
__lowerCAmelCase = (1, self.in_channels, self.sample_size, self.sample_size)
__lowerCAmelCase = jnp.zeros(SCREAMING_SNAKE_CASE__ , dtype=jnp.floataa )
__lowerCAmelCase = jnp.ones((1,) , dtype=jnp.intaa )
__lowerCAmelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__lowerCAmelCase , __lowerCAmelCase = jax.random.split(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )["params"]
def a ( self : int ) -> List[str]:
__lowerCAmelCase = self.block_out_channels
__lowerCAmelCase = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"""At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.""" )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__lowerCAmelCase = self.num_attention_heads or self.attention_head_dim
# input
__lowerCAmelCase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__lowerCAmelCase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__lowerCAmelCase = FlaxTimestepEmbedding(SCREAMING_SNAKE_CASE__ , dtype=self.dtype )
__lowerCAmelCase = self.only_cross_attention
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = (num_attention_heads,) * len(self.down_block_types )
# down
__lowerCAmelCase = []
__lowerCAmelCase = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = block_out_channels[i]
__lowerCAmelCase = i == len(SCREAMING_SNAKE_CASE__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__lowerCAmelCase = FlaxCrossAttnDownBlockaD(
in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__lowerCAmelCase = FlaxDownBlockaD(
in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = down_blocks
# mid
__lowerCAmelCase = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
__lowerCAmelCase = []
__lowerCAmelCase = list(reversed(SCREAMING_SNAKE_CASE__ ) )
__lowerCAmelCase = list(reversed(SCREAMING_SNAKE_CASE__ ) )
__lowerCAmelCase = list(reversed(SCREAMING_SNAKE_CASE__ ) )
__lowerCAmelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = reversed_block_out_channels[i]
__lowerCAmelCase = reversed_block_out_channels[min(i + 1 , len(SCREAMING_SNAKE_CASE__ ) - 1 )]
__lowerCAmelCase = i == len(SCREAMING_SNAKE_CASE__ ) - 1
if up_block_type == "CrossAttnUpBlock2D":
__lowerCAmelCase = FlaxCrossAttnUpBlockaD(
in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , prev_output_channel=SCREAMING_SNAKE_CASE__ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__lowerCAmelCase = FlaxUpBlockaD(
in_channels=SCREAMING_SNAKE_CASE__ , out_channels=SCREAMING_SNAKE_CASE__ , prev_output_channel=SCREAMING_SNAKE_CASE__ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = output_channel
__lowerCAmelCase = up_blocks
# out
__lowerCAmelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__lowerCAmelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(SCREAMING_SNAKE_CASE__ , jnp.ndarray ):
__lowerCAmelCase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(SCREAMING_SNAKE_CASE__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
__lowerCAmelCase = timesteps.astype(dtype=jnp.floataa )
__lowerCAmelCase = jnp.expand_dims(SCREAMING_SNAKE_CASE__ , 0 )
__lowerCAmelCase = self.time_proj(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.time_embedding(SCREAMING_SNAKE_CASE__ )
# 2. pre-process
__lowerCAmelCase = jnp.transpose(SCREAMING_SNAKE_CASE__ , (0, 2, 3, 1) )
__lowerCAmelCase = self.conv_in(SCREAMING_SNAKE_CASE__ )
# 3. down
__lowerCAmelCase = (sample,)
for down_block in self.down_blocks:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase , __lowerCAmelCase = down_block(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , deterministic=not train )
else:
__lowerCAmelCase , __lowerCAmelCase = down_block(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
__lowerCAmelCase = ()
for down_block_res_sample, down_block_additional_residual in zip(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
__lowerCAmelCase = new_down_block_res_samples
# 4. mid
__lowerCAmelCase = self.mid_block(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
__lowerCAmelCase = down_block_res_samples[-(self.layers_per_block + 1) :]
__lowerCAmelCase = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = up_block(
SCREAMING_SNAKE_CASE__ , temb=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , res_hidden_states_tuple=SCREAMING_SNAKE_CASE__ , deterministic=not train , )
else:
__lowerCAmelCase = up_block(SCREAMING_SNAKE_CASE__ , temb=SCREAMING_SNAKE_CASE__ , res_hidden_states_tuple=SCREAMING_SNAKE_CASE__ , deterministic=not train )
# 6. post-process
__lowerCAmelCase = self.conv_norm_out(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = nn.silu(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.conv_out(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = jnp.transpose(SCREAMING_SNAKE_CASE__ , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=SCREAMING_SNAKE_CASE__ )
| 229 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> tuple:
lowercase__ : int = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 302 |
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowerCAmelCase_ = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = "maskformer"
lowerCAmelCase : Any = {"hidden_size": "mask_feature_size"}
lowerCAmelCase : Optional[int] = ["resnet", "swin"]
lowerCAmelCase : str = ["detr"]
def __init__( self : int ,_snake_case : int = 256 ,_snake_case : int = 256 ,_snake_case : float = 0.1 ,_snake_case : bool = False ,_snake_case : Optional[Dict] = None ,_snake_case : Optional[Dict] = None ,_snake_case : float = 0.02 ,_snake_case : float = 1.0 ,_snake_case : float = 1.0 ,_snake_case : float = 1.0 ,_snake_case : float = 20.0 ,_snake_case : Optional[bool] = None ,**_snake_case : Optional[Any] ,) -> Dict:
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowercase__ : Any = SwinConfig(
image_size=384 ,in_channels=3 ,patch_size=4 ,embed_dim=128 ,depths=[2, 2, 18, 2] ,num_heads=[4, 8, 16, 32] ,window_size=12 ,drop_path_rate=0.3 ,out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ,)
if isinstance(_snake_case ,_snake_case ):
lowercase__ : List[str] = backbone_config.pop('''model_type''' )
lowercase__ : List[Any] = CONFIG_MAPPING[backbone_model_type]
lowercase__ : str = config_class.from_dict(_snake_case )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowercase__ : Union[str, Any] = DetrConfig()
else:
# verify that the decoder is supported
lowercase__ : Tuple = (
decoder_config.pop('''model_type''' ) if isinstance(_snake_case ,_snake_case ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {",".join(self.decoders_supported )}""" )
if isinstance(_snake_case ,_snake_case ):
lowercase__ : Optional[int] = CONFIG_MAPPING[decoder_type]
lowercase__ : Optional[Any] = config_class.from_dict(_snake_case )
lowercase__ : List[Any] = backbone_config
lowercase__ : List[Any] = decoder_config
# main feature dimension for the model
lowercase__ : List[str] = fpn_feature_size
lowercase__ : int = mask_feature_size
# initializer
lowercase__ : str = init_std
lowercase__ : str = init_xavier_std
# Hungarian matcher && loss
lowercase__ : Optional[int] = cross_entropy_weight
lowercase__ : List[Any] = dice_weight
lowercase__ : List[str] = mask_weight
lowercase__ : str = use_auxiliary_loss
lowercase__ : Optional[int] = no_object_weight
lowercase__ : Optional[Any] = output_auxiliary_logits
lowercase__ : Optional[Any] = self.decoder_config.encoder_attention_heads
lowercase__ : Optional[Any] = self.decoder_config.num_hidden_layers
super().__init__(**_snake_case )
@classmethod
def UpperCAmelCase ( cls : Any ,_snake_case : PretrainedConfig ,_snake_case : PretrainedConfig ,**_snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return cls(
backbone_config=_snake_case ,decoder_config=_snake_case ,**_snake_case ,)
def UpperCAmelCase ( self : str ) -> Dict[str, any]:
"""simple docstring"""
lowercase__ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase__ : int = self.backbone_config.to_dict()
lowercase__ : List[Any] = self.decoder_config.to_dict()
lowercase__ : List[str] = self.__class__.model_type
return output
| 302 | 1 |
def _UpperCamelCase ( lowercase__ = 1000000 ):
__SCREAMING_SNAKE_CASE : List[str] = set(range(3 , lowercase__ , 2 ) )
primes.add(2 )
for p in range(3 , lowercase__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowercase__ , lowercase__ ) ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = [float(lowercase__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowercase__ , limit + 1 , lowercase__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 9 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 199 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : Optional[Any] =logging.get_logger(__name__)
A_ : Dict ={
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class __a ( lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = "convnextv2"
def __init__( self , a__=3 , a__=4 , a__=4 , a__=None , a__=None , a__="gelu" , a__=0.02 , a__=1e-12 , a__=0.0 , a__=2_24 , a__=None , a__=None , **a__ , ):
super().__init__(**a__ )
_lowerCamelCase = num_channels
_lowerCamelCase = patch_size
_lowerCamelCase = num_stages
_lowerCamelCase = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes
_lowerCamelCase = [3, 3, 9, 3] if depths is None else depths
_lowerCamelCase = hidden_act
_lowerCamelCase = initializer_range
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = drop_path_rate
_lowerCamelCase = image_size
_lowerCamelCase = ['stem'] + [F'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )]
_lowerCamelCase , _lowerCamelCase = get_aligned_output_features_output_indices(
out_features=a__ , out_indices=a__ , stage_names=self.stage_names )
| 80 |
"""simple docstring"""
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def SCREAMING_SNAKE_CASE_ ( )-> Any:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
_lowerCamelCase = '__test_patch_submodule_mock__'
with patch_submodule(_test_patching , 'os.path.join' , snake_case ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def SCREAMING_SNAKE_CASE_ ( )-> Optional[int]:
assert _test_patching.open is open
_lowerCamelCase = '__test_patch_submodule_builtin_mock__'
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , 'open' , snake_case ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def SCREAMING_SNAKE_CASE_ ( )-> Tuple:
# pandas.read_csv is not present in _test_patching
_lowerCamelCase = '__test_patch_submodule_missing_mock__'
with patch_submodule(_test_patching , 'pandas.read_csv' , snake_case ):
pass
def SCREAMING_SNAKE_CASE_ ( )-> Any:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
_lowerCamelCase = '__test_patch_submodule_missing_builtin_mock__'
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , 'len' , snake_case ) is None
with patch_submodule(_test_patching , 'len' , snake_case ):
assert _test_patching.len is mock
assert _test_patching.len is len
def SCREAMING_SNAKE_CASE_ ( )-> Any:
_lowerCamelCase = '__test_patch_submodule_start_and_stop_mock__'
_lowerCamelCase = patch_submodule(_test_patching , 'open' , snake_case )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def SCREAMING_SNAKE_CASE_ ( )-> Tuple:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
_lowerCamelCase = '__test_patch_submodule_successive_join__'
_lowerCamelCase = '__test_patch_submodule_successive_dirname__'
_lowerCamelCase = '__test_patch_submodule_successive_rename__'
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , 'os.path.join' , snake_case ):
with patch_submodule(_test_patching , 'os.rename' , snake_case ):
with patch_submodule(_test_patching , 'os.path.dirname' , snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , 'os.rename' , snake_case ):
with patch_submodule(_test_patching , 'os.path.join' , snake_case ):
with patch_submodule(_test_patching , 'os.path.dirname' , snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def SCREAMING_SNAKE_CASE_ ( )-> Optional[int]:
_lowerCamelCase = '__test_patch_submodule_doesnt_exist_mock__'
with patch_submodule(_test_patching , '__module_that_doesn_exist__.__attribute_that_doesn_exist__' , snake_case ):
pass
with patch_submodule(_test_patching , 'os.__attribute_that_doesn_exist__' , snake_case ):
pass
| 80 | 1 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
UpperCAmelCase__ = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def _a ( a :Dict , a :Optional[int] ) -> List[str]:
inspect_dataset(a , a )
a = path + '''.py'''
assert script_name in os.listdir(a )
assert "__pycache__" not in os.listdir(a )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def _a ( a :Any , a :Optional[Any] ) -> Union[str, Any]:
inspect_metric(a , a )
a = path + '''.py'''
assert script_name in os.listdir(a )
assert "__pycache__" not in os.listdir(a )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def _a ( a :Union[str, Any] , a :List[Any] , a :Optional[Any] ) -> List[str]:
a = get_dataset_config_info(a , config_name=a )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def _a ( a :str , a :Tuple , a :Any ) -> Tuple:
with pytest.raises(a ):
get_dataset_config_info(a , config_name=a )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def _a ( a :Any , a :Union[str, Any] ) -> Optional[Any]:
a = get_dataset_config_names(a )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def _a ( a :Optional[int] , a :Optional[Any] , a :Optional[Any] ) -> List[Any]:
a = get_dataset_infos(a )
assert list(infos.keys() ) == expected_configs
a = expected_configs[0]
assert expected_config in infos
a = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def _a ( a :str , a :Union[str, Any] , a :Union[str, Any] ) -> Any:
a = get_dataset_infos(a )
assert expected_config in infos
a = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def _a ( a :Dict , a :Any , a :Optional[int] ) -> Tuple:
with pytest.raises(a ):
get_dataset_split_names(a , config_name=a )
| 0 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A_ ( snake_case__ ):
_lowercase : int = (DPMSolverSinglestepScheduler,)
_lowercase : Optional[Any] = (('num_inference_steps', 2_5),)
def UpperCAmelCase ( self : Dict , **UpperCAmelCase : List[Any] ) -> Optional[Any]:
__lowerCAmelCase: Union[str, Any] = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf' ),
'variance_type': None,
}
config.update(**UpperCAmelCase )
return config
def UpperCAmelCase ( self : str , UpperCAmelCase : List[Any]=0 , **UpperCAmelCase : str ) -> Any:
__lowerCAmelCase: Optional[int] = dict(self.forward_default_kwargs )
__lowerCAmelCase: int = kwargs.pop('num_inference_steps' , UpperCAmelCase )
__lowerCAmelCase: int = self.dummy_sample
__lowerCAmelCase: Union[str, Any] = 0.1 * sample
__lowerCAmelCase: str = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase: Union[str, Any] = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
__lowerCAmelCase: Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
__lowerCAmelCase: Dict = scheduler_class.from_pretrained(UpperCAmelCase )
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals
__lowerCAmelCase: Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = sample, sample
for t in range(UpperCAmelCase , time_step + scheduler.config.solver_order + 1 ):
__lowerCAmelCase: str = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
__lowerCAmelCase: str = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self : str ) -> str:
pass
def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : Any=0 , **UpperCAmelCase : Optional[int] ) -> Tuple:
__lowerCAmelCase: Tuple = dict(self.forward_default_kwargs )
__lowerCAmelCase: Tuple = kwargs.pop('num_inference_steps' , UpperCAmelCase )
__lowerCAmelCase: Tuple = self.dummy_sample
__lowerCAmelCase: Union[str, Any] = 0.1 * sample
__lowerCAmelCase: Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase: Dict = self.get_scheduler_config()
__lowerCAmelCase: Any = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
__lowerCAmelCase: List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase )
__lowerCAmelCase: List[str] = scheduler_class.from_pretrained(UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
__lowerCAmelCase: Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase: Any = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
__lowerCAmelCase: Dict = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self : int , UpperCAmelCase : Dict=None , **UpperCAmelCase : List[str] ) -> Union[str, Any]:
if scheduler is None:
__lowerCAmelCase: str = self.scheduler_classes[0]
__lowerCAmelCase: int = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Any = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: List[Any] = self.scheduler_classes[0]
__lowerCAmelCase: List[str] = self.get_scheduler_config(**UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: List[Any] = 1_0
__lowerCAmelCase: Dict = self.dummy_model()
__lowerCAmelCase: Dict = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase: Dict = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: List[Any] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
return sample
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
__lowerCAmelCase: List[str] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__lowerCAmelCase: Any = 5_0
__lowerCAmelCase: int = self.dummy_model()
__lowerCAmelCase: List[str] = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
__lowerCAmelCase: List[Any] = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: List[Any] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
__lowerCAmelCase: Optional[int] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
__lowerCAmelCase: List[str] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__lowerCAmelCase: Dict = self.full_loop(scheduler=UpperCAmelCase )
__lowerCAmelCase: Optional[Any] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
__lowerCAmelCase: Tuple = DEISMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: List[str] = DPMSolverMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: Any = UniPCMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase: Optional[int] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__lowerCAmelCase: Union[str, Any] = self.full_loop(scheduler=UpperCAmelCase )
__lowerCAmelCase: List[Any] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def UpperCAmelCase ( self : List[str] ) -> List[str]:
self.check_over_configs(thresholding=UpperCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , algorithm_type='dpmsolver++' , solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , )
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> str:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , )
__lowerCAmelCase: Dict = self.full_loop(
solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , )
assert not torch.isnan(UpperCAmelCase ).any(), "Samples have nan numbers"
def UpperCAmelCase ( self : Optional[Any] ) -> str:
self.check_over_configs(lower_order_final=UpperCAmelCase )
self.check_over_configs(lower_order_final=UpperCAmelCase )
def UpperCAmelCase ( self : str ) -> Any:
self.check_over_configs(lambda_min_clipped=-float('inf' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def UpperCAmelCase ( self : List[Any] ) -> str:
self.check_over_configs(variance_type=UpperCAmelCase )
self.check_over_configs(variance_type='learned_range' )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=UpperCAmelCase , time_step=0 )
def UpperCAmelCase ( self : Any ) -> int:
__lowerCAmelCase: Any = self.full_loop()
__lowerCAmelCase: Tuple = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase: List[str] = self.full_loop(use_karras_sigmas=UpperCAmelCase )
__lowerCAmelCase: str = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase: Tuple = self.full_loop(prediction_type='v_prediction' )
__lowerCAmelCase: List[str] = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def UpperCAmelCase ( self : str ) -> List[str]:
__lowerCAmelCase: int = self.full_loop(prediction_type='v_prediction' , use_karras_sigmas=UpperCAmelCase )
__lowerCAmelCase: Tuple = torch.mean(torch.abs(UpperCAmelCase ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase: Any = self.scheduler_classes[0]
__lowerCAmelCase: Optional[Any] = self.get_scheduler_config(thresholding=UpperCAmelCase , dynamic_thresholding_ratio=0 )
__lowerCAmelCase: List[str] = scheduler_class(**UpperCAmelCase )
__lowerCAmelCase: Optional[int] = 1_0
__lowerCAmelCase: Union[str, Any] = self.dummy_model()
__lowerCAmelCase: int = self.dummy_sample_deter.half()
scheduler.set_timesteps(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase: Any = model(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: Any = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
| 322 | 0 |
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='''%(message)s''')
def a ( __a ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def a ( __a , __a , __a ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = np.nan
for i in range(__a ):
UpperCamelCase__ :List[str] = features[:, labels == i]
UpperCamelCase__ :str = data.mean(1 )
# Centralize the data of class i
UpperCamelCase__ :List[str] = data - column_reshape(__a )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(__a , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ :List[str] = np.dot(__a , centered_data.T )
return covariance_sum / features.shape[1]
def a ( __a , __a , __a ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ :Tuple = features.mean(1 )
UpperCamelCase__ :Optional[Any] = np.nan
for i in range(__a ):
UpperCamelCase__ :str = features[:, labels == i]
UpperCamelCase__ :Dict = data.shape[1]
UpperCamelCase__ :Union[str, Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(__a ) - column_reshape(__a ) , (column_reshape(__a ) - column_reshape(__a )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ :str = device_data * np.dot(
column_reshape(__a ) - column_reshape(__a ) , (column_reshape(__a ) - column_reshape(__a )).T , )
return covariance_sum / features.shape[1]
def a ( __a , __a ) -> np.ndarray:
'''simple docstring'''
if features.any():
UpperCamelCase__ :str = features.mean(1 )
# Center the dataset
UpperCamelCase__ :Any = features - np.reshape(__a , (data_mean.size, 1) )
UpperCamelCase__ :Optional[int] = np.dot(__a , centered_data.T ) / features.shape[1]
UpperCamelCase__ , UpperCamelCase__ :Any = np.linalg.eigh(__a )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCamelCase__ :int = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCamelCase__ :List[str] = np.dot(filtered_eigenvectors.T , __a )
logging.info('''Principal Component Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=__a )
logging.error('''Dataset empty''' )
raise AssertionError
def a ( __a , __a , __a , __a ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCamelCase__ , UpperCamelCase__ :Any = eigh(
covariance_between_classes(__a , __a , __a ) , covariance_within_classes(__a , __a , __a ) , )
UpperCamelCase__ :str = eigenvectors[:, ::-1][:, :dimensions]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Tuple = np.linalg.svd(__a )
UpperCamelCase__ :List[str] = svd_matrix[:, 0:dimensions]
UpperCamelCase__ :List[str] = np.dot(filtered_svd_matrix.T , __a )
logging.info('''Linear Discriminant Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=__a )
logging.error('''Dataset empty''' )
raise AssertionError
def a ( ) -> None:
'''simple docstring'''
UpperCamelCase__ :Any = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCamelCase__ :Optional[Any] = np.array([0, 0, 0, 1, 1] )
UpperCamelCase__ :str = 2
UpperCamelCase__ :Optional[int] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(__a ) as error_info:
UpperCamelCase__ :Any = linear_discriminant_analysis(
__a , __a , __a , __a )
if isinstance(__a , np.ndarray ):
raise AssertionError(
'''Did not raise AssertionError for dimensions > classes''' )
assert error_info.type is AssertionError
def a ( ) -> None:
'''simple docstring'''
UpperCamelCase__ :int = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCamelCase__ :Union[str, Any] = 2
UpperCamelCase__ :List[Any] = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(__a ) as error_info:
UpperCamelCase__ :Dict = principal_component_analysis(__a , __a )
if not np.allclose(__a , __a ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod() | 219 |
'''simple docstring'''
from __future__ import annotations
__snake_case = list[list[int]]
# assigning initial values to the grid
__snake_case = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__snake_case = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def a ( __a , __a , __a , __a ) -> bool:
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def a ( __a ) -> tuple[int, int] | None:
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def a ( __a ) -> Matrix | None:
'''simple docstring'''
if location := find_empty_location(__a ):
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__a , __a , __a , __a ):
UpperCamelCase__ :Tuple = digit
if sudoku(__a ) is not None:
return grid
UpperCamelCase__ :Union[str, Any] = 0
return None
def a ( __a ) -> None:
'''simple docstring'''
for row in grid:
for cell in row:
print(__a , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
__snake_case = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''') | 219 | 1 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] ) ->list:
'''simple docstring'''
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
a : Tuple = gray_code_sequence_string(lowerCAmelCase_ )
#
# convert them to integers
for i in range(len(lowerCAmelCase_ ) ):
a : Optional[int] = int(sequence[i] , 2 )
return sequence
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] ) ->list:
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
a : Any = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
a : List[Any] = gray_code_sequence_string(bit_count - 1 )
a : str = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
a : List[str] = '''0''' + smaller_sequence[i]
sequence.append(lowerCAmelCase_ )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
a : Optional[Any] = '''1''' + smaller_sequence[i]
sequence.append(lowerCAmelCase_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class snake_case__:
'''simple docstring'''
@staticmethod
def lowercase_ ( *__lowercase , **__lowercase ) -> Union[str, Any]:
pass
@is_pipeline_test
@require_vision
@require_torch
class snake_case__( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> List[str]:
lowerCAmelCase_ : Union[str, Any] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCAmelCase_ : str = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def lowercase_ ( self , __lowercase , __lowercase ) -> str:
lowerCAmelCase_ : Tuple = object_detector(examples[0] , threshold=0.0 )
lowerCAmelCase_ : Dict = len(__lowercase )
self.assertGreater(__lowercase , 0 )
self.assertEqual(
__lowercase , [
{
'''score''': ANY(__lowercase ),
'''label''': ANY(__lowercase ),
'''box''': {'''xmin''': ANY(__lowercase ), '''ymin''': ANY(__lowercase ), '''xmax''': ANY(__lowercase ), '''ymax''': ANY(__lowercase )},
}
for i in range(__lowercase )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def lowercase_ ( self ) -> List[str]:
pass
@require_torch
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Union[str, Any] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCAmelCase_ : Union[str, Any] = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
] , )
lowerCAmelCase_ : Union[str, Any] = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
]
] , )
@require_torch
@slow
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Any = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase_ : Dict = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
] , )
lowerCAmelCase_ : Tuple = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def lowercase_ ( self ) -> List[str]:
pass
@require_torch
@slow
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Any = 0.2
lowerCAmelCase_ : List[Any] = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase_ : Optional[Any] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=__lowercase , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
] , )
@require_torch
@slow
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Dict = 2
lowerCAmelCase_ : Union[str, Any] = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase_ : Optional[Any] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=__lowercase , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
] , ) | 262 | 0 |
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str) -> list[int]:
'''simple docstring'''
__UpperCamelCase : Any = [0 for i in range(len(_lowerCamelCase))]
# initialize interval's left pointer and right pointer
__UpperCamelCase , __UpperCamelCase : str = 0, 0
for i in range(1 , len(_lowerCamelCase)):
# case when current index is inside the interval
if i <= right_pointer:
__UpperCamelCase : Dict = min(right_pointer - i + 1 , z_result[i - left_pointer])
__UpperCamelCase : Dict = min_edge
while go_next(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = i, i + z_result[i] - 1
return z_result
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : list[int] , _lowerCamelCase : str) -> bool:
'''simple docstring'''
return i + z_result[i] < len(_lowerCamelCase) and s[z_result[i]] == s[i + z_result[i]]
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : str) -> int:
'''simple docstring'''
__UpperCamelCase : Any = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
__UpperCamelCase : List[Any] = z_function(pattern + input_str)
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_lowerCamelCase):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod() | 151 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self :Optional[Any] , a :Optional[Any] , a :Dict=1_3 , a :Tuple=7 , a :List[Any]=True , a :List[str]=True , a :List[Any]=True , a :Optional[Any]=True , a :Union[str, Any]=9_9 , a :int=3_2 , a :Optional[Any]=2 , a :List[str]=4 , a :Optional[Any]=3_7 , a :Union[str, Any]="gelu" , a :Optional[int]=0.1 , a :Dict=0.1 , a :Tuple=5_1_2 , a :Union[str, Any]=1_6 , a :int=2 , a :Any=0.02 , a :Union[str, Any]=False , a :int=True , a :str="None" , a :Union[str, Any]=3 , a :str=4 , a :List[Any]=None , ) -> Tuple:
__UpperCamelCase : Tuple = parent
__UpperCamelCase : List[str] = batch_size
__UpperCamelCase : Optional[Any] = seq_length
__UpperCamelCase : Any = is_training
__UpperCamelCase : Dict = use_input_mask
__UpperCamelCase : List[str] = use_token_type_ids
__UpperCamelCase : Optional[int] = use_labels
__UpperCamelCase : Optional[Any] = vocab_size
__UpperCamelCase : Optional[Any] = hidden_size
__UpperCamelCase : Dict = num_hidden_layers
__UpperCamelCase : Any = num_attention_heads
__UpperCamelCase : str = intermediate_size
__UpperCamelCase : Union[str, Any] = hidden_act
__UpperCamelCase : Union[str, Any] = hidden_dropout_prob
__UpperCamelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCamelCase : Tuple = max_position_embeddings
__UpperCamelCase : Tuple = type_vocab_size
__UpperCamelCase : Any = type_sequence_label_size
__UpperCamelCase : int = initializer_range
__UpperCamelCase : Dict = num_labels
__UpperCamelCase : Dict = num_choices
__UpperCamelCase : List[str] = relative_attention
__UpperCamelCase : Union[str, Any] = position_biased_input
__UpperCamelCase : Any = pos_att_type
__UpperCamelCase : Optional[Any] = scope
def _lowerCamelCase ( self :List[Any] ) -> List[Any]:
__UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : Tuple = None
if self.use_input_mask:
__UpperCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : List[Any] = None
if self.use_token_type_ids:
__UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase : Union[str, Any] = None
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : List[Any] = None
if self.use_labels:
__UpperCamelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase : List[str] = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self :Optional[int] , a :int , a :List[Any] , a :Optional[int] , a :Union[str, Any] , a :Union[str, Any] , a :str , a :int ) -> Optional[int]:
__UpperCamelCase : List[str] = TFDebertaVaModel(config=a )
__UpperCamelCase : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__UpperCamelCase : Optional[Any] = [input_ids, input_mask]
__UpperCamelCase : Optional[int] = model(a )
__UpperCamelCase : Any = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self :str , a :List[Any] , a :Dict , a :Tuple , a :Union[str, Any] , a :str , a :Optional[int] , a :Optional[int] ) -> Optional[int]:
__UpperCamelCase : List[Any] = TFDebertaVaForMaskedLM(config=a )
__UpperCamelCase : Optional[int] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Tuple = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self :List[Any] , a :Optional[int] , a :Optional[Any] , a :int , a :Optional[int] , a :Any , a :Dict , a :List[Any] ) -> Optional[int]:
__UpperCamelCase : Optional[int] = self.num_labels
__UpperCamelCase : int = TFDebertaVaForSequenceClassification(config=a )
__UpperCamelCase : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Optional[int] = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self :Optional[Any] , a :int , a :Dict , a :Union[str, Any] , a :Tuple , a :Tuple , a :Union[str, Any] , a :str ) -> int:
__UpperCamelCase : Tuple = self.num_labels
__UpperCamelCase : str = TFDebertaVaForTokenClassification(config=a )
__UpperCamelCase : Optional[int] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Union[str, Any] = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self :List[str] , a :List[Any] , a :Union[str, Any] , a :List[str] , a :Union[str, Any] , a :Optional[Any] , a :Union[str, Any] , a :Tuple ) -> int:
__UpperCamelCase : List[Any] = TFDebertaVaForQuestionAnswering(config=a )
__UpperCamelCase : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__UpperCamelCase : Tuple = model(a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self :List[str] ) -> List[Any]:
__UpperCamelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : List[Any] = config_and_inputs
__UpperCamelCase : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( __lowercase , __lowercase , unittest.TestCase):
'''simple docstring'''
_A = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
_A = (
{
'feature-extraction': TFDebertaVaModel,
'fill-mask': TFDebertaVaForMaskedLM,
'question-answering': TFDebertaVaForQuestionAnswering,
'text-classification': TFDebertaVaForSequenceClassification,
'token-classification': TFDebertaVaForTokenClassification,
'zero-shot': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
_A = False
_A = False
def _lowerCamelCase ( self :Dict ) -> str:
__UpperCamelCase : Dict = TFDebertaVaModelTester(self )
__UpperCamelCase : int = ConfigTester(self , config_class=a , hidden_size=3_7 )
def _lowerCamelCase ( self :Tuple ) -> Optional[int]:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self :List[Any] ) -> List[str]:
__UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self :Optional[int] ) -> List[Any]:
__UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a )
def _lowerCamelCase ( self :Optional[Any] ) -> str:
__UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a )
def _lowerCamelCase ( self :Optional[Any] ) -> Dict:
__UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a )
def _lowerCamelCase ( self :Any ) -> Optional[Any]:
__UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a )
@slow
def _lowerCamelCase ( self :int ) -> int:
__UpperCamelCase : Tuple = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(a )
@require_tf
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
@unittest.skip(reason="Model not available yet" )
def _lowerCamelCase ( self :Optional[Any] ) -> Any:
pass
@slow
def _lowerCamelCase ( self :Any ) -> Optional[int]:
__UpperCamelCase : List[Any] = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
__UpperCamelCase : List[Any] = tf.constant([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
__UpperCamelCase : Optional[Any] = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__UpperCamelCase : str = model(a , attention_mask=a )[0]
__UpperCamelCase : Optional[int] = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , a , atol=1E-4 ) | 151 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
__a = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
__a = [144, 192, 240]
__a = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
__a = [96, 120, 144]
__a = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
__a = [64, 80, 96]
__a = [16, 16, 24, 48, 64, 80, 320]
__a = 0.05
__a = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
__a = 512
__a = 16
__a = 21
__a = """pascal-voc-id2label.json"""
else:
__a = 1000
__a = """imagenet-1k-id2label.json"""
__a = """huggingface/label-files"""
__a = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
__a = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int=False ):
"""simple docstring"""
for i in range(1 , 6 ):
if f"layer_{i}." in name:
__a = name.replace(f"layer_{i}." , f"encoder.layer.{i - 1}." )
if "conv_1." in name:
__a = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
__a = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
__a = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
__a = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
__a = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
__a = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
__a = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
__a = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
__a = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if f".{i}.{j}." in name:
__a = name.replace(f".{i}.{j}." , f".{i}.layer.{j}." )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if f".{i}.{j}." in name:
__a = name.replace(f".{i}.{j}." , f".{i}." )
if "expand_1x1" in name:
__a = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
__a = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
__a = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if f".global_rep.{i}.weight" in name:
__a = name.replace(f".global_rep.{i}.weight" , """.layernorm.weight""" )
if f".global_rep.{i}.bias" in name:
__a = name.replace(f".global_rep.{i}.bias" , """.layernorm.bias""" )
if ".global_rep." in name:
__a = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
__a = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
__a = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
__a = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
__a = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
__a = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
__a = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
__a = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
__a = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
__a = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
__a = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
__a = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
__a = """mobilevit.""" + name
return name
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str]=False ):
"""simple docstring"""
if base_model:
__a = """"""
else:
__a = """mobilevit."""
for key in orig_state_dict.copy().keys():
__a = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if key[:8] == "encoder.":
__a = key[8:]
if "qkv" in key:
__a = key.split(""".""" )
__a = int(key_split[0][6:] ) - 1
__a = int(key_split[3] )
__a = model.get_submodule(f"{model_prefix}encoder.layer.{layer_num}" )
__a = layer.transformer.layer[transformer_num].attention.attention.all_head_size
__a = (
f"{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."
)
if "weight" in key:
__a = val[:dim, :]
__a = val[dim : dim * 2, :]
__a = val[-dim:, :]
else:
__a = val[:dim]
__a = val[dim : dim * 2]
__a = val[-dim:]
else:
__a = val
return orig_state_dict
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__a = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[str]=False ):
"""simple docstring"""
__a = get_mobilevit_config(_SCREAMING_SNAKE_CASE )
# load original state_dict
__a = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
__a = MobileViTForSemanticSegmentation(_SCREAMING_SNAKE_CASE ).eval()
else:
__a = MobileViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
__a = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by MobileViTImageProcessor
__a = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
__a = image_processor(images=prepare_img() , return_tensors="""pt""" )
__a = model(**_SCREAMING_SNAKE_CASE )
__a = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
__a = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
__a = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
__a = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(f"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
__a = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
__a = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
__a = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(f"Unknown mobilevit_name: {mobilevit_name}" )
assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(f"Saving model {mobilevit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
__a = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
__a = model_mapping[mobilevit_name]
image_processor.push_to_hub(_SCREAMING_SNAKE_CASE , organization="""apple""" )
model.push_to_hub(_SCREAMING_SNAKE_CASE , organization="""apple""" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--mobilevit_name""",
default="""mobilevit_s""",
type=str,
help=(
"""Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"""
""" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."""
),
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase__ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 302 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : torch.FloatTensor
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ ):
@register_to_config
def __init__( self : Dict , __lowercase : int = 32 , __lowercase : int = 64 , __lowercase : int = 20 , __lowercase : int = 768 , __lowercase : Any=77 , __lowercase : Optional[int]=4 , __lowercase : float = 0.0 , __lowercase : str = "silu" , __lowercase : Optional[str] = None , __lowercase : Optional[str] = None , __lowercase : Optional[str] = "linear" , __lowercase : Optional[str] = "prd" , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
__a = num_attention_heads
__a = attention_head_dim
__a = num_attention_heads * attention_head_dim
__a = additional_embeddings
__a = time_embed_dim or inner_dim
__a = embedding_proj_dim or embedding_dim
__a = clip_embed_dim or embedding_dim
__a = Timesteps(__lowercase , __lowercase , 0 )
__a = TimestepEmbedding(__lowercase , __lowercase , out_dim=__lowercase , act_fn=__lowercase )
__a = nn.Linear(__lowercase , __lowercase )
if embedding_proj_norm_type is None:
__a = None
elif embedding_proj_norm_type == "layer":
__a = nn.LayerNorm(__lowercase )
else:
raise ValueError(F"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}" )
__a = nn.Linear(__lowercase , __lowercase )
if encoder_hid_proj_type is None:
__a = None
elif encoder_hid_proj_type == "linear":
__a = nn.Linear(__lowercase , __lowercase )
else:
raise ValueError(F"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}" )
__a = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , __lowercase ) )
if added_emb_type == "prd":
__a = nn.Parameter(torch.zeros(1 , 1 , __lowercase ) )
elif added_emb_type is None:
__a = None
else:
raise ValueError(
F"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`." )
__a = nn.ModuleList(
[
BasicTransformerBlock(
__lowercase , __lowercase , __lowercase , dropout=__lowercase , activation_fn="""gelu""" , attention_bias=__lowercase , )
for d in range(__lowercase )
] )
if norm_in_type == "layer":
__a = nn.LayerNorm(__lowercase )
elif norm_in_type is None:
__a = None
else:
raise ValueError(F"Unsupported norm_in_type: {norm_in_type}." )
__a = nn.LayerNorm(__lowercase )
__a = nn.Linear(__lowercase , __lowercase )
__a = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
__a = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , __lowercase , persistent=__lowercase )
__a = nn.Parameter(torch.zeros(1 , __lowercase ) )
__a = nn.Parameter(torch.zeros(1 , __lowercase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = {}
def fn_recursive_add_processors(__lowercase : str , __lowercase : torch.nn.Module , __lowercase : Dict[str, AttentionProcessor] ):
if hasattr(__lowercase , """set_processor""" ):
__a = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , __lowercase , __lowercase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__lowercase , __lowercase , __lowercase )
return processors
def UpperCamelCase_ ( self : List[str] , __lowercase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
'''simple docstring'''
__a = len(self.attn_processors.keys() )
if isinstance(__lowercase , __lowercase ) and len(__lowercase ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(__lowercase )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(__lowercase : str , __lowercase : torch.nn.Module , __lowercase : Dict ):
if hasattr(__lowercase , """set_processor""" ):
if not isinstance(__lowercase , __lowercase ):
module.set_processor(__lowercase )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , __lowercase , __lowercase )
for name, module in self.named_children():
fn_recursive_attn_processor(__lowercase , __lowercase , __lowercase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Union[torch.Tensor, float, int] , __lowercase : torch.FloatTensor , __lowercase : Optional[torch.FloatTensor] = None , __lowercase : Optional[torch.BoolTensor] = None , __lowercase : bool = True , ):
'''simple docstring'''
__a = hidden_states.shape[0]
__a = timestep
if not torch.is_tensor(__lowercase ):
__a = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(__lowercase ) and len(timesteps.shape ) == 0:
__a = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__a = timesteps * torch.ones(__lowercase , dtype=timesteps.dtype , device=timesteps.device )
__a = self.time_proj(__lowercase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__a = timesteps_projected.to(dtype=self.dtype )
__a = self.time_embedding(__lowercase )
if self.embedding_proj_norm is not None:
__a = self.embedding_proj_norm(__lowercase )
__a = self.embedding_proj(__lowercase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__a = self.encoder_hidden_states_proj(__lowercase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
__a = self.proj_in(__lowercase )
__a = self.positional_embedding.to(hidden_states.dtype )
__a = []
__a = 0
if encoder_hidden_states is not None:
additional_embeds.append(__lowercase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__a = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__a = hidden_states[:, None, :]
__a = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__a = self.prd_embedding.to(hidden_states.dtype ).expand(__lowercase , -1 , -1 )
additional_embeds.append(__lowercase )
__a = torch.cat(
__lowercase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__a = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__a = F.pad(
__lowercase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__a = hidden_states + positional_embeddings
if attention_mask is not None:
__a = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
__a = F.pad(__lowercase , (0, self.additional_embeddings) , value=0.0 )
__a = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__a = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__a = self.norm_in(__lowercase )
for block in self.transformer_blocks:
__a = block(__lowercase , attention_mask=__lowercase )
__a = self.norm_out(__lowercase )
if self.prd_embedding is not None:
__a = hidden_states[:, -1]
else:
__a = hidden_states[:, additional_embeddings_len:]
__a = self.proj_to_clip_embeddings(__lowercase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=__lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : Tuple ):
'''simple docstring'''
__a = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 302 | 1 |
"""simple docstring"""
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[Any] ):
lowerCAmelCase = {}
lowerCAmelCase = job['started_at']
lowerCAmelCase = job['completed_at']
lowerCAmelCase = date_parser.parse(_UpperCAmelCase )
lowerCAmelCase = date_parser.parse(_UpperCAmelCase )
lowerCAmelCase = round((end_datetime - start_datetime).total_seconds() / 60.0 )
lowerCAmelCase = start
lowerCAmelCase = end
lowerCAmelCase = duration_in_min
return job_info
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any]=None ):
lowerCAmelCase = None
if token is not None:
lowerCAmelCase = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
lowerCAmelCase = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
lowerCAmelCase = requests.get(_UpperCAmelCase , headers=_UpperCAmelCase ).json()
lowerCAmelCase = {}
try:
job_time.update({job['name']: extract_time_from_single_job(_UpperCAmelCase ) for job in result['jobs']} )
lowerCAmelCase = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_UpperCAmelCase ):
lowerCAmelCase = requests.get(url + F'&page={i + 2}' , headers=_UpperCAmelCase ).json()
job_time.update({job['name']: extract_time_from_single_job(_UpperCAmelCase ) for job in result['jobs']} )
return job_time
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
if __name__ == "__main__":
__UpperCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
__UpperCamelCase : Union[str, Any] = parser.parse_args()
__UpperCamelCase : Any = get_job_time(args.workflow_run_id)
__UpperCamelCase : List[str] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f'''{k}: {v['duration']}''')
| 367 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a ( unittest.TestCase ):
def __init__( self , _snake_case , _snake_case=7 , _snake_case=3 , _snake_case=18 , _snake_case=30 , _snake_case=4_00 , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=None , _snake_case=True , ):
"""simple docstring"""
lowerCAmelCase = size if size is not None else {'shortest_edge': 20}
lowerCAmelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = image_size
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = do_flip_channel_order
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class a ( a__ , unittest.TestCase ):
snake_case__ = MobileViTImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = MobileViTImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , 'do_resize' ) )
self.assertTrue(hasattr(_snake_case , 'size' ) )
self.assertTrue(hasattr(_snake_case , 'do_center_crop' ) )
self.assertTrue(hasattr(_snake_case , 'center_crop' ) )
self.assertTrue(hasattr(_snake_case , 'do_flip_channel_order' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , np.ndarray )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , torch.Tensor )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCAmelCase = image_processing(_snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 309 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Any = logging.get_logger(__name__)
a__ : int = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class lowercase_ ( a__ ):
__UpperCAmelCase = 'vit_msn'
def __init__( self , a=7_68 , a=12 , a=12 , a=30_72 , a="gelu" , a=0.0 , a=0.0 , a=0.02 , a=1e-06 , a=2_24 , a=16 , a=3 , a=True , **a , ):
super().__init__(**a )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = qkv_bias
| 80 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a__ : List[str] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowercase_ ( a__ ):
__UpperCAmelCase = ['pixel_values']
def __init__( self , a = True , a = None , a = PILImageResampling.BICUBIC , a = True , a = None , a = True , a = 1 / 2_55 , a = True , a = None , a = None , a = True , **a , ):
super().__init__(**a )
UpperCamelCase__ = size if size is not None else {"shortest_edge": 2_24}
UpperCamelCase__ = get_size_dict(a , default_to_square=a )
UpperCamelCase__ = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCamelCase__ = get_size_dict(a , default_to_square=a , param_name="crop_size" )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = resample
UpperCamelCase__ = do_center_crop
UpperCamelCase__ = crop_size
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCamelCase__ = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCamelCase__ = do_convert_rgb
def __a ( self , a , a , a = PILImageResampling.BICUBIC , a = None , **a , ):
UpperCamelCase__ = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCamelCase__ = get_resize_output_image_size(a , size=size["shortest_edge"] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def __a ( self , a , a , a = None , **a , ):
UpperCamelCase__ = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def __a ( self , a , a , a = None , **a , ):
return rescale(a , scale=a , data_format=a , **a )
def __a ( self , a , a , a , a = None , **a , ):
return normalize(a , mean=a , std=a , data_format=a , **a )
def __a ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ):
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(a , param_name="size" , default_to_square=a )
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ = get_size_dict(a , param_name="crop_size" , default_to_square=a )
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ = image_std if image_std is not None else self.image_std
UpperCamelCase__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase__ = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase__ = [convert_to_rgb(a ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(a ) for image in images]
if do_resize:
UpperCamelCase__ = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
UpperCamelCase__ = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
UpperCamelCase__ = [self.normalize(image=a , mean=a , std=a ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(a , a ) for image in images]
UpperCamelCase__ = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
| 80 | 1 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ (__lowercase , unittest.TestCase ):
UpperCAmelCase__ : List[Any] = LongformerTokenizer
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : List[str] = LongformerTokenizerFast
UpperCAmelCase__ : Any = True
def lowerCamelCase__( self :Any ) -> int:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a__ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
a__ = dict(zip(_a ,range(len(_a ) ) ) )
a__ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
a__ = {'''unk_token''': '''<unk>'''}
a__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
a__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(_a ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(_a ) )
def lowerCamelCase__( self :Union[str, Any] ,**__snake_case :Union[str, Any] ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**_a )
def lowerCamelCase__( self :Tuple ,**__snake_case :Optional[int] ) -> Tuple:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**_a )
def lowerCamelCase__( self :List[str] ,__snake_case :Dict ) -> Union[str, Any]:
a__ = '''lower newer'''
a__ = '''lower newer'''
return input_text, output_text
def lowerCamelCase__( self :Optional[int] ) -> Union[str, Any]:
a__ = self.tokenizer_class(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
a__ = '''lower newer'''
a__ = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
a__ = tokenizer.tokenize(_a ) # , add_prefix_space=True)
self.assertListEqual(_a ,_a )
a__ = tokens + [tokenizer.unk_token]
a__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) ,_a )
def lowerCamelCase__( self :Dict ) -> int:
a__ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' ,add_special_tokens=_a ) ,[0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' ,add_special_tokens=_a ) ,[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] ,)
@slow
def lowerCamelCase__( self :Optional[int] ) -> List[str]:
a__ = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
a__ = tokenizer.encode('sequence builders' ,add_special_tokens=_a )
a__ = tokenizer.encode('multi-sequence build' ,add_special_tokens=_a )
a__ = tokenizer.encode(
'sequence builders' ,add_special_tokens=_a ,add_prefix_space=_a )
a__ = tokenizer.encode(
'sequence builders' ,'multi-sequence build' ,add_special_tokens=_a ,add_prefix_space=_a )
a__ = tokenizer.build_inputs_with_special_tokens(_a )
a__ = tokenizer.build_inputs_with_special_tokens(_a ,_a )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCamelCase__( self :List[Any] ) -> Optional[int]:
a__ = self.get_tokenizer()
a__ = '''Encode this sequence.'''
a__ = tokenizer.byte_encoder[''' '''.encode('utf-8' )[0]]
# Testing encoder arguments
a__ = tokenizer.encode(_a ,add_special_tokens=_a ,add_prefix_space=_a )
a__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_a ,_a )
a__ = tokenizer.encode(_a ,add_special_tokens=_a ,add_prefix_space=_a )
a__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_a ,_a )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
a__ = tokenizer.encode(_a ,add_special_tokens=_a )
a__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_a ,_a )
# Testing spaces after special tokens
a__ = '''<mask>'''
tokenizer.add_special_tokens(
{'mask_token': AddedToken(_a ,lstrip=_a ,rstrip=_a )} ) # mask token has a left space
a__ = tokenizer.convert_tokens_to_ids(_a )
a__ = '''Encode <mask> sequence'''
a__ = '''Encode <mask>sequence'''
a__ = tokenizer.encode(_a )
a__ = encoded.index(_a )
a__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_a ,_a )
a__ = tokenizer.encode(_a )
a__ = encoded.index(_a )
a__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_a ,_a )
def lowerCamelCase__( self :Optional[Any] ) -> Any:
pass
def lowerCamelCase__( self :int ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a__ = self.rust_tokenizer_class.from_pretrained(_a ,**_a )
a__ = self.tokenizer_class.from_pretrained(_a ,**_a )
a__ = '''A, <mask> AllenNLP sentence.'''
a__ = tokenizer_r.encode_plus(_a ,add_special_tokens=_a ,return_token_type_ids=_a )
a__ = tokenizer_p.encode_plus(_a ,add_special_tokens=_a ,return_token_type_ids=_a )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) ,sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) ,sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) ,)
a__ = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
a__ = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
_a ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_a ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def lowerCamelCase__( self :Dict ) -> Optional[int]:
for trim_offsets, add_prefix_space in itertools.product([True, False] ,repeat=2 ):
a__ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname ,use_fast=_a ,add_prefix_space=_a ,trim_offsets=_a )
a__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] ,_a )
self.assertEqual(post_processor_state['add_prefix_space'] ,_a )
self.assertEqual(post_processor_state['trim_offsets'] ,_a )
def lowerCamelCase__( self :Tuple ) -> Union[str, Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a__ = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
a__ = F'{text_of_1_token} {text_of_1_token}'
a__ = self.rust_tokenizer_class.from_pretrained(
_a ,use_fast=_a ,add_prefix_space=_a ,trim_offsets=_a )
a__ = tokenizer_r(_a ,return_offsets_mapping=_a ,add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(_a ) + 1, len(_a ) + 1 + len(_a )) ,)
a__ = self.rust_tokenizer_class.from_pretrained(
_a ,use_fast=_a ,add_prefix_space=_a ,trim_offsets=_a )
a__ = tokenizer_r(_a ,return_offsets_mapping=_a ,add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(_a ) + 1, len(_a ) + 1 + len(_a )) ,)
a__ = self.rust_tokenizer_class.from_pretrained(
_a ,use_fast=_a ,add_prefix_space=_a ,trim_offsets=_a )
a__ = tokenizer_r(_a ,return_offsets_mapping=_a ,add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(_a ), len(_a ) + 1 + len(_a )) ,)
a__ = self.rust_tokenizer_class.from_pretrained(
_a ,use_fast=_a ,add_prefix_space=_a ,trim_offsets=_a )
a__ = tokenizer_r(_a ,return_offsets_mapping=_a ,add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(_a ), len(_a ) + 1 + len(_a )) ,)
a__ = F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a__ = self.rust_tokenizer_class.from_pretrained(
_a ,use_fast=_a ,add_prefix_space=_a ,trim_offsets=_a )
a__ = tokenizer_r(_a ,return_offsets_mapping=_a ,add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(_a ) + 1, 1 + len(_a ) + 1 + len(_a )) ,)
a__ = self.rust_tokenizer_class.from_pretrained(
_a ,use_fast=_a ,add_prefix_space=_a ,trim_offsets=_a )
a__ = tokenizer_r(_a ,return_offsets_mapping=_a ,add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(_a ), 1 + len(_a ) + 1 + len(_a )) ,)
a__ = self.rust_tokenizer_class.from_pretrained(
_a ,use_fast=_a ,add_prefix_space=_a ,trim_offsets=_a )
a__ = tokenizer_r(_a ,return_offsets_mapping=_a ,add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(_a ), 1 + len(_a ) + 1 + len(_a )) ,)
| 365 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
snake_case : Dict = get_logger(__name__)
snake_case : str = r'''
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
'''
class snake_case_ :
@add_start_docstrings(__snake_case )
def __call__( self :Dict ,__snake_case :jnp.ndarray ,__snake_case :jnp.ndarray ) -> jnp.ndarray:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class snake_case_ :
@add_start_docstrings(__snake_case )
def __call__( self :List[str] ,__snake_case :jnp.ndarray ,__snake_case :jnp.ndarray ) -> jnp.ndarray:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class snake_case_ (lowerCamelCase_ ):
@add_start_docstrings(__snake_case )
def __call__( self :Dict ,__snake_case :jnp.ndarray ,__snake_case :jnp.ndarray ,__snake_case :int ,**__snake_case :Any ) -> jnp.ndarray:
for processor in self:
a__ = inspect.signature(processor.__call__ ).parameters
if len(__snake_case ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'Make sure that all the required parameters: {list(function_args.keys() )} for '
F'{processor.__class__} are passed to the logits processor.' )
a__ = processor(__snake_case ,__snake_case ,__snake_case ,**__snake_case )
else:
a__ = processor(__snake_case ,__snake_case ,__snake_case )
return scores
class snake_case_ (lowerCamelCase_ ):
def __init__( self :str ,__snake_case :float ) -> Tuple:
if not isinstance(__snake_case ,__snake_case ) or not (temperature > 0):
raise ValueError(F'`temperature` has to be a strictly positive float, but is {temperature}' )
a__ = temperature
def __call__( self :Optional[int] ,__snake_case :jnp.ndarray ,__snake_case :jnp.ndarray ,__snake_case :int ) -> jnp.ndarray:
a__ = scores / self.temperature
return scores
class snake_case_ (lowerCamelCase_ ):
def __init__( self :Any ,__snake_case :float ,__snake_case :float = -float('Inf' ) ,__snake_case :int = 1 ) -> Dict:
if not isinstance(__snake_case ,__snake_case ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'`top_p` has to be a float > 0 and < 1, but is {top_p}' )
if not isinstance(__snake_case ,__snake_case ) or (min_tokens_to_keep < 1):
raise ValueError(F'`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}' )
a__ = top_p
a__ = filter_value
a__ = min_tokens_to_keep
def __call__( self :Optional[int] ,__snake_case :jnp.ndarray ,__snake_case :jnp.ndarray ,__snake_case :int ) -> jnp.ndarray:
a__ , a__ = lax.top_k(__snake_case ,scores.shape[-1] )
a__ = jnp.full_like(__snake_case ,self.filter_value )
a__ = jax.nn.softmax(__snake_case ,axis=-1 ).cumsum(axis=-1 )
a__ = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
a__ = jnp.roll(__snake_case ,1 )
score_mask |= score_mask.at[:, 0].set(__snake_case )
# min tokens to keep
a__ = score_mask.at[:, : self.min_tokens_to_keep].set(__snake_case )
a__ = jnp.where(__snake_case ,__snake_case ,__snake_case )
a__ = jax.lax.sort_key_val(__snake_case ,__snake_case )[-1]
return next_scores
class snake_case_ (lowerCamelCase_ ):
def __init__( self :List[str] ,__snake_case :int ,__snake_case :float = -float('Inf' ) ,__snake_case :int = 1 ) -> Any:
if not isinstance(__snake_case ,__snake_case ) or top_k <= 0:
raise ValueError(F'`top_k` has to be a strictly positive integer, but is {top_k}' )
a__ = max(__snake_case ,__snake_case )
a__ = filter_value
def __call__( self :int ,__snake_case :jnp.ndarray ,__snake_case :jnp.ndarray ,__snake_case :int ) -> jnp.ndarray:
a__ , a__ = scores.shape
a__ = jnp.full(batch_size * vocab_size ,self.filter_value )
a__ = min(self.top_k ,scores.shape[-1] ) # Safety check
a__ , a__ = lax.top_k(__snake_case ,__snake_case )
a__ = jnp.broadcast_to((jnp.arange(__snake_case ) * vocab_size)[:, None] ,(batch_size, topk) ).flatten()
a__ = topk_scores.flatten()
a__ = topk_indices.flatten() + shift
a__ = next_scores_flat.at[topk_indices_flat].set(__snake_case )
a__ = next_scores_flat.reshape(__snake_case ,__snake_case )
return next_scores
class snake_case_ (lowerCamelCase_ ):
def __init__( self :int ,__snake_case :int ) -> str:
a__ = bos_token_id
def __call__( self :List[Any] ,__snake_case :jnp.ndarray ,__snake_case :jnp.ndarray ,__snake_case :int ) -> jnp.ndarray:
a__ = jnp.full(scores.shape ,-float('inf' ) )
a__ = 1 - jnp.bool_(cur_len - 1 )
a__ = jnp.where(__snake_case ,new_scores.at[:, self.bos_token_id].set(0 ) ,__snake_case )
return scores
class snake_case_ (lowerCamelCase_ ):
def __init__( self :Union[str, Any] ,__snake_case :int ,__snake_case :int ) -> List[Any]:
a__ = max_length
a__ = eos_token_id
def __call__( self :int ,__snake_case :jnp.ndarray ,__snake_case :jnp.ndarray ,__snake_case :int ) -> jnp.ndarray:
a__ = jnp.full(scores.shape ,-float('inf' ) )
a__ = 1 - jnp.bool_(cur_len - self.max_length + 1 )
a__ = jnp.where(__snake_case ,new_scores.at[:, self.eos_token_id].set(0 ) ,__snake_case )
return scores
class snake_case_ (lowerCamelCase_ ):
def __init__( self :str ,__snake_case :int ,__snake_case :int ) -> List[str]:
if not isinstance(__snake_case ,__snake_case ) or min_length < 0:
raise ValueError(F'`min_length` has to be a positive integer, but is {min_length}' )
if not isinstance(__snake_case ,__snake_case ) or eos_token_id < 0:
raise ValueError(F'`eos_token_id` has to be a positive integer, but is {eos_token_id}' )
a__ = min_length
a__ = eos_token_id
def __call__( self :Any ,__snake_case :jnp.ndarray ,__snake_case :jnp.ndarray ,__snake_case :int ) -> jnp.ndarray:
# create boolean flag to decide if min length penalty should be applied
a__ = 1 - jnp.clip(cur_len - self.min_length ,0 ,1 )
a__ = jnp.where(__snake_case ,scores.at[:, self.eos_token_id].set(-float('inf' ) ) ,__snake_case )
return scores
class snake_case_ (lowerCamelCase_ ):
def __init__( self :Optional[int] ,__snake_case :List[str] ,__snake_case :Optional[int] ) -> Tuple:
a__ = list(__snake_case )
a__ = begin_index
def __call__( self :str ,__snake_case :List[str] ,__snake_case :str ,__snake_case :int ) -> str:
a__ = 1 - jnp.bool_(cur_len - self.begin_index )
a__ = jnp.where(__snake_case ,scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) ,__snake_case )
return scores
class snake_case_ (lowerCamelCase_ ):
def __init__( self :List[str] ,__snake_case :list ) -> List[Any]:
a__ = list(__snake_case )
def __call__( self :Dict ,__snake_case :jnp.ndarray ,__snake_case :jnp.ndarray ,__snake_case :int ) -> jnp.ndarray:
a__ = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class snake_case_ (lowerCamelCase_ ):
def __init__( self :Dict ,__snake_case :Optional[int] ) -> Union[str, Any]:
a__ = dict(__snake_case )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
a__ = jnp.ones((max(force_token_map.keys() ) + 1) ,dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
a__ = force_token_array.at[index].set(__snake_case )
a__ = jnp.intaa(__snake_case )
def __call__( self :Optional[int] ,__snake_case :jnp.ndarray ,__snake_case :jnp.ndarray ,__snake_case :int ) -> jnp.ndarray:
def _force_token(__snake_case :Optional[Any] ):
a__ = scores.shape[0]
a__ = self.force_token_array[generation_idx]
a__ = jnp.ones_like(__snake_case ,dtype=scores.dtype ) * -float('inf' )
a__ = jnp.zeros((batch_size, 1) ,dtype=scores.dtype )
a__ = lax.dynamic_update_slice(__snake_case ,__snake_case ,(0, current_token) )
return new_scores
a__ = lax.cond(
cur_len >= self.force_token_array.shape[0] ,lambda: scores ,lambda: lax.cond(
self.force_token_array[cur_len] >= 0 ,lambda: _force_token(__snake_case ) ,lambda: scores ,) ,)
return scores
class snake_case_ (lowerCamelCase_ ):
def __init__( self :Any ,__snake_case :List[str] ,__snake_case :str ,__snake_case :List[Any] ) -> Optional[int]:
a__ = generate_config.eos_token_id
a__ = generate_config.no_timestamps_token_id
a__ = generate_config.no_timestamps_token_id + 1
a__ = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(__snake_case ,'max_initial_timestamp_index' ):
a__ = generate_config.max_initial_timestamp_index
else:
a__ = model_config.vocab_size
if self.max_initial_timestamp_index is None:
a__ = model_config.vocab_size
def __call__( self :Any ,__snake_case :List[Any] ,__snake_case :Optional[int] ,__snake_case :Optional[Any] ) -> Tuple:
# suppress <|notimestamps|> which is handled by without_timestamps
a__ = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(__snake_case :List[str] ,__snake_case :Union[str, Any] ):
a__ = jnp.where((cur_len - self.begin_index) >= 1 ,__snake_case ,__snake_case )
a__ = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin ,True and last_was_timestamp ,__snake_case ,)
a__ = jnp.where((cur_len - self.begin_index) < 2 ,__snake_case ,__snake_case )
a__ = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin ,__snake_case ,__snake_case ,)
return jnp.where(
__snake_case ,jnp.where(
penultimate_was_timestamp > 0 ,scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) ,scores_k.at[: self.eos_token_id].set(-float('inf' ) ) ,) ,__snake_case ,)
a__ = jax.vmap(__snake_case )(__snake_case ,__snake_case )
a__ = jnp.where(cur_len == self.begin_index ,__snake_case ,__snake_case )
a__ = jnp.where(
self.max_initial_timestamp_index is not None ,True and apply_max_initial_timestamp ,__snake_case ,)
a__ = self.timestamp_begin + self.max_initial_timestamp_index
a__ = jnp.where(
__snake_case ,scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) ,__snake_case ,)
# if sum of probability over timestamps is above any other token, sample timestamp
a__ = jax.nn.log_softmax(__snake_case ,axis=-1 )
def handle_cumulative_probs(__snake_case :Dict ,__snake_case :List[Any] ):
a__ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] ,axis=-1 )
a__ = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob ,scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) ,__snake_case ,)
a__ = jax.vmap(__snake_case )(__snake_case ,__snake_case )
return scores
| 109 | 0 |
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> int:
"""simple docstring"""
assert (
isinstance(__UpperCamelCase , __UpperCamelCase ) and number_of_steps > 0
), f"""number_of_steps needs to be positive integer, your input {number_of_steps}"""
if number_of_steps == 1:
return 1
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 1, 1
for _ in range(number_of_steps - 1 ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 219 | def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : int ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
SCREAMING_SNAKE_CASE__ = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE__ = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE__ = max(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCamelCase ) , b_binary.zfill(__UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 219 | 1 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__A =importlib.util.find_spec('''s3fs''') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__A =[
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowerCamelCase_ ( lowerCamelCase__ ):
if "://" in dataset_path:
lowerCamelCase_ = dataset_path.split("://" )[1]
return dataset_path
def lowerCamelCase_ ( lowerCamelCase__ ):
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = not is_remote_filesystem(lowerCamelCase__ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowerCamelCase__ ) , fs._strip_protocol(lowerCamelCase__ ) )
else:
fs.mv(lowerCamelCase__ , lowerCamelCase__ , recursive=lowerCamelCase__ )
def lowerCamelCase_ ( ):
if hasattr(fsspec.asyn , "reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = threading.Lock()
| 47 |
from collections import defaultdict
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = first_str.lower().strip()
lowerCamelCase_ = second_str.lower().strip()
# Remove whitespace
lowerCamelCase_ = first_str.replace(" " , "" )
lowerCamelCase_ = second_str.replace(" " , "" )
# Strings of different lengths are not anagrams
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
return False
# Default values for count should be 0
lowerCamelCase_ = defaultdict(lowerCamelCase__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(lowerCamelCase__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
__A =input('''Enter the first string ''').strip()
__A =input('''Enter the second string ''').strip()
__A =check_anagrams(input_a, input_b)
print(F"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 47 | 1 |
'''simple docstring'''
# flake8: noqa
# Lint as: python3
lowercase__ = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 151 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = "▁"
lowercase__ = {"vocab_file": "spiece.model"}
lowercase__ = {
"vocab_file": {
"google/reformer-crime-and-punishment": (
"https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
)
}
}
lowercase__ = {
"google/reformer-crime-and-punishment": 524288,
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Dict = VOCAB_FILES_NAMES
UpperCAmelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : str , lowercase_ : Dict , lowercase_ : Tuple="</s>" , lowercase_ : Dict="<unk>" , lowercase_ : Tuple=[] , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : List[str] , ) -> None:
UpperCAmelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase_ , unk_token=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
UpperCAmelCase : List[Any] = vocab_file
UpperCAmelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
return self.sp_model.get_piece_size()
def UpperCAmelCase_ ( self : List[str] ) -> Dict[str, int]:
UpperCAmelCase : int = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ) -> str:
UpperCAmelCase : Tuple = self.__dict__.copy()
UpperCAmelCase : Union[str, Any] = None
return state
def __setstate__( self : Optional[Any] , lowercase_ : Any ) -> List[str]:
UpperCAmelCase : Dict = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCAmelCase : Dict = {}
UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : str ) -> List[str]:
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def UpperCAmelCase_ ( self : int , lowercase_ : Tuple ) -> Optional[int]:
return self.sp_model.piece_to_id(lowercase_ )
def UpperCAmelCase_ ( self : List[str] , lowercase_ : Optional[int] ) -> List[str]:
if index < self.sp_model.get_piece_size():
UpperCAmelCase : Tuple = self.sp_model.IdToPiece(lowercase_ )
return token
def UpperCAmelCase_ ( self : List[str] , lowercase_ : Optional[int] ) -> Optional[int]:
UpperCAmelCase : Dict = []
UpperCAmelCase : int = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase_ ) + token
UpperCAmelCase : Any = []
else:
current_sub_tokens.append(lowercase_ )
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowercase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase : int = os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , 'wb' ) as fi:
UpperCAmelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 151 | 1 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
A_ : List[str] = get_tests_dir('fixtures/test_sentencepiece.model')
A_ : str = {'target_lang': 'fi', 'source_lang': 'en'}
A_ : List[Any] = '>>zh<<'
A_ : Optional[int] = 'Helsinki-NLP/'
if is_torch_available():
A_ : Any = 'pt'
elif is_tf_available():
A_ : Tuple = 'tf'
else:
A_ : List[str] = 'jax'
@require_sentencepiece
class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] =MarianTokenizer
a : Union[str, Any] =False
a : Optional[int] =True
def _a ( self ):
super().setUp()
UpperCamelCase_: Dict = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
UpperCamelCase_: int = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
UpperCamelCase_: Dict = Path(self.tmpdirname )
save_json(_lowerCamelCase , save_dir / VOCAB_FILES_NAMES['vocab'] )
save_json(_lowerCamelCase , save_dir / VOCAB_FILES_NAMES['tokenizer_config_file'] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(_lowerCamelCase , save_dir / VOCAB_FILES_NAMES['source_spm'] )
copyfile(_lowerCamelCase , save_dir / VOCAB_FILES_NAMES['target_spm'] )
UpperCamelCase_: Any = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self , **_lowerCamelCase ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self , _lowerCamelCase ):
return (
"This is a test",
"This is a test",
)
def _a ( self ):
UpperCamelCase_: str = '</s>'
UpperCamelCase_: Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase )
def _a ( self ):
UpperCamelCase_: List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(_lowerCamelCase ) , 9 )
def _a ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def _a ( self ):
UpperCamelCase_: Optional[Any] = MarianTokenizer.from_pretrained(f'''{ORG_NAME}opus-mt-en-de''' )
UpperCamelCase_: List[str] = en_de_tokenizer(['I am a small frog'] , return_tensors=_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: List[str] = [3_8, 1_2_1, 1_4, 6_9_7, 3_8_8_4_8, 0]
self.assertListEqual(_lowerCamelCase , batch.input_ids[0] )
UpperCamelCase_: Union[str, Any] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(_lowerCamelCase )
UpperCamelCase_: List[Any] = [x.name for x in Path(_lowerCamelCase ).glob('*' )]
self.assertIn('source.spm' , _lowerCamelCase )
MarianTokenizer.from_pretrained(_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Optional[int] = self.get_tokenizer()
UpperCamelCase_: Any = tok(
['I am a small frog' * 1_0_0_0, 'I am a small frog'] , padding=_lowerCamelCase , truncation=_lowerCamelCase , return_tensors=_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2) )
def _a ( self ):
UpperCamelCase_: Union[str, Any] = self.get_tokenizer()
UpperCamelCase_: Any = tok(['I am a tiny frog', 'I am a small frog'] , padding=_lowerCamelCase , return_tensors=_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(batch_smaller.input_ids.shape , (2, 1_0) )
@slow
def _a ( self ):
# fmt: off
UpperCamelCase_: Tuple = {'input_ids': [[4_3_4_9_5, 4_6_2, 2_0, 4_2_1_6_4, 1_3_6_9, 5_2, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 7_4_9_1, 3_8_9_9_9, 6, 8, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 4_6_6_9, 3_7_8_6_7, 1_3, 7_5_2_5, 2_7, 1_5_9_3, 9_8_8, 1_3, 3_3_9_7_2, 7_0_2_9, 6, 2_0, 8_2_5_1, 3_8_3, 2, 2_7_0, 5_8_6_6, 3_7_8_8, 2, 2_3_5_3, 8_2_5_1, 1_2_3_3_8, 2, 1_3_9_5_8, 3_8_7, 2, 3_6_2_9, 6_9_5_3, 1_8_8, 2_9_0_0, 2, 1_3_9_5_8, 8_0_1_1, 1_1_5_0_1, 2_3, 8_4_6_0, 4_0_7_3, 3_4_0_0_9, 2_0, 4_3_5, 1_1_4_3_9, 2_7, 8, 8_4_6_0, 4_0_7_3, 6_0_0_4, 2_0, 9_9_8_8, 3_7_5, 2_7, 3_3, 2_6_6, 1_9_4_5, 1_0_7_6, 1_3_5_0, 3_7_8_6_7, 3_2_8_8, 5, 5_7_7, 1_0_7_6, 4_3_7_4, 8, 5_0_8_2, 5, 2_6_4_5_3, 2_5_7, 5_5_6, 4_0_3, 2, 2_4_2, 1_3_2, 3_8_3, 3_1_6, 4_9_2, 8, 1_0_7_6_7, 6, 3_1_6, 3_0_4, 4_2_3_9, 3, 0], [1_4_8, 1_5_7_2_2, 1_9, 1_8_3_9, 1_2, 1_3_5_0, 1_3, 2_2_3_2_7, 5_0_8_2, 5_4_1_8, 4_7_5_6_7, 3_5_9_3_8, 5_9, 3_1_8, 1_9_5_5_2, 1_0_8, 2_1_8_3, 5_4, 1_4_9_7_6, 4_8_3_5, 3_2, 5_4_7, 1_1_1_4, 8, 3_1_5, 2_4_1_7, 5, 9_2, 1_9_0_8_8, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0], [3_6, 6_3_9_5, 1_2_5_7_0, 3_9_1_4_7, 1_1_5_9_7, 6, 2_6_6, 4, 4_5_4_0_5, 7_2_9_6, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase , model_name='Helsinki-NLP/opus-mt-en-de' , revision='1a8c2263da11e68e50938f97e10cd57820bd504c' , decode_kwargs={'use_source_tokenizer': True} , )
def _a ( self ):
UpperCamelCase_: Union[str, Any] = MarianTokenizer.from_pretrained('hf-internal-testing/test-marian-two-vocabs' )
UpperCamelCase_: List[Any] = 'Tämä on testi'
UpperCamelCase_: Dict = 'This is a test'
UpperCamelCase_: Dict = [7_6, 7, 2_0_4_7, 2]
UpperCamelCase_: List[str] = [6_9, 1_2, 1_1, 9_4_0, 2]
UpperCamelCase_: Tuple = tokenizer(_lowerCamelCase ).input_ids
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: List[str] = tokenizer(text_target=_lowerCamelCase ).input_ids
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: str = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase ) | 355 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
A_ : int = logging.get_logger(__name__)
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : str =['''pixel_values''']
def __init__( self , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = PILImageResampling.BICUBIC , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = 1 / 2_5_5 , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
UpperCamelCase_: Any = size if size is not None else {'height': 2_2_4, 'width': 2_2_4}
UpperCamelCase_: Any = get_size_dict(_lowerCamelCase )
UpperCamelCase_: Any = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
UpperCamelCase_: List[Any] = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase , param_name='crop_size' )
UpperCamelCase_: Optional[int] = do_resize
UpperCamelCase_: Tuple = do_rescale
UpperCamelCase_: Dict = do_normalize
UpperCamelCase_: Optional[int] = do_center_crop
UpperCamelCase_: Tuple = crop_size
UpperCamelCase_: Optional[int] = size
UpperCamelCase_: Dict = resample
UpperCamelCase_: Tuple = rescale_factor
UpperCamelCase_: Tuple = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCamelCase_: Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = PILImageResampling.BILINEAR , _lowerCamelCase = None , **_lowerCamelCase , ):
UpperCamelCase_: Optional[Any] = get_size_dict(_lowerCamelCase )
if "shortest_edge" in size:
UpperCamelCase_: str = get_resize_output_image_size(_lowerCamelCase , size=size['shortest_edge'] , default_to_square=_lowerCamelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
UpperCamelCase_: Any = (size['height'], size['width'])
else:
raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
UpperCamelCase_: List[str] = get_size_dict(_lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_lowerCamelCase , size=(size['height'], size['width']) , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase ):
return rescale(_lowerCamelCase , scale=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
return normalize(_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ):
UpperCamelCase_: List[str] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_: str = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase_: Dict = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_: List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase_: Optional[Any] = crop_size if crop_size is not None else self.crop_size
UpperCamelCase_: List[str] = get_size_dict(_lowerCamelCase , param_name='crop_size' , default_to_square=_lowerCamelCase )
UpperCamelCase_: Tuple = resample if resample is not None else self.resample
UpperCamelCase_: str = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase_: Dict = image_mean if image_mean is not None else self.image_mean
UpperCamelCase_: Dict = image_std if image_std is not None else self.image_std
UpperCamelCase_: Any = size if size is not None else self.size
UpperCamelCase_: Optional[Any] = get_size_dict(_lowerCamelCase )
if not is_batched(_lowerCamelCase ):
UpperCamelCase_: Dict = [images]
if not valid_images(_lowerCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
UpperCamelCase_: List[str] = [to_numpy_array(_lowerCamelCase ) for image in images]
if do_resize:
UpperCamelCase_: List[str] = [self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase ) for image in images]
if do_center_crop:
UpperCamelCase_: List[str] = [self.center_crop(image=_lowerCamelCase , size=_lowerCamelCase ) for image in images]
if do_rescale:
UpperCamelCase_: int = [self.rescale(image=_lowerCamelCase , scale=_lowerCamelCase ) for image in images]
if do_normalize:
UpperCamelCase_: Optional[Any] = [self.normalize(image=_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase ) for image in images]
UpperCamelCase_: List[str] = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) for image in images]
UpperCamelCase_: Optional[int] = {'pixel_values': images}
return BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase ) | 292 | 0 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
__UpperCamelCase : int = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class lowercase__ ( _a):
UpperCamelCase_ = """encodec"""
def __init__( self : List[str] , UpperCamelCase__ : Any=[1.5, 3.0, 6.0, 12.0, 24.0] , UpperCamelCase__ : int=2_4000 , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Any=128 , UpperCamelCase__ : List[Any]=32 , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : Optional[int]=[8, 5, 4, 2] , UpperCamelCase__ : str="weight_norm" , UpperCamelCase__ : List[str]=7 , UpperCamelCase__ : Optional[Any]=7 , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[str]="reflect" , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : List[str]=1.0 , UpperCamelCase__ : str=1024 , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[Any]=True , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = target_bandwidths
SCREAMING_SNAKE_CASE : Any = sampling_rate
SCREAMING_SNAKE_CASE : Tuple = audio_channels
SCREAMING_SNAKE_CASE : str = normalize
SCREAMING_SNAKE_CASE : Any = chunk_length_s
SCREAMING_SNAKE_CASE : List[Any] = overlap
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_filters
SCREAMING_SNAKE_CASE : Dict = num_residual_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = upsampling_ratios
SCREAMING_SNAKE_CASE : int = norm_type
SCREAMING_SNAKE_CASE : Tuple = kernel_size
SCREAMING_SNAKE_CASE : int = last_kernel_size
SCREAMING_SNAKE_CASE : str = residual_kernel_size
SCREAMING_SNAKE_CASE : Tuple = dilation_growth_rate
SCREAMING_SNAKE_CASE : Dict = use_causal_conv
SCREAMING_SNAKE_CASE : int = pad_mode
SCREAMING_SNAKE_CASE : Optional[Any] = compress
SCREAMING_SNAKE_CASE : str = num_lstm_layers
SCREAMING_SNAKE_CASE : List[Any] = trim_right_ratio
SCREAMING_SNAKE_CASE : Dict = codebook_size
SCREAMING_SNAKE_CASE : List[Any] = codebook_dim if codebook_dim is not None else hidden_size
SCREAMING_SNAKE_CASE : List[Any] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**snake_case_ )
@property
def __A ( self : str ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __A ( self : Any ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __A ( self : Optional[Any] ):
'''simple docstring'''
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 182 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
_lowerCAmelCase : int = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("""RGB""" )
return image
def _UpperCAmelCase ( _lowerCamelCase : Any ) -> Dict:
_lowerCAmelCase : str = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] ) -> Optional[Any]:
_lowerCAmelCase : str = dct.pop(_lowerCamelCase )
_lowerCAmelCase : str = val
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple ) -> Tuple:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_lowerCAmelCase : Tuple = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
_lowerCAmelCase : Optional[Any] = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
_lowerCAmelCase : int = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
_lowerCAmelCase : str = qkv_bias
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase : str = 3_64 if """coco""" in model_name else 2_24
_lowerCAmelCase : str = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_lowerCAmelCase : int = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
_lowerCAmelCase : Union[str, Any] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
_lowerCAmelCase : Optional[int] = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_lowerCAmelCase : str = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
_lowerCAmelCase : Dict = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]=None , _lowerCamelCase : int=False ) -> List[str]:
_lowerCAmelCase : int = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
_lowerCAmelCase : List[Any] = tokenizer("""\n""" , add_special_tokens=_lowerCamelCase ).input_ids[0]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
_lowerCAmelCase : Union[str, Any] = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
_lowerCAmelCase , _lowerCAmelCase : List[str] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_lowerCAmelCase : Dict = """cuda""" if torch.cuda.is_available() else """cpu"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
_lowerCAmelCase : List[Any] = original_model.state_dict()
_lowerCAmelCase : Optional[int] = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_lowerCAmelCase : Tuple = state_dict.pop(_lowerCamelCase )
if key.startswith("""Qformer.bert""" ):
_lowerCAmelCase : List[Any] = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
_lowerCAmelCase : Optional[int] = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
_lowerCAmelCase : Dict = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
_lowerCAmelCase : Tuple = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
_lowerCAmelCase : List[Any] = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
_lowerCAmelCase : int = key.replace("""t5""" , """language""" )
_lowerCAmelCase : Tuple = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_lowerCAmelCase : Union[str, Any] = load_demo_image()
_lowerCAmelCase : Optional[int] = vis_processors["""eval"""](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
_lowerCAmelCase : List[str] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
# create processor
_lowerCAmelCase : Optional[int] = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
_lowerCAmelCase : Tuple = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
_lowerCAmelCase : Any = processor(images=_lowerCamelCase , return_tensors="""pt""" ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
_lowerCAmelCase : Optional[Any] = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
_lowerCAmelCase : Optional[Any] = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
_lowerCAmelCase : List[Any] = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
_lowerCAmelCase : Tuple = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
_lowerCAmelCase : Dict = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_lowerCAmelCase : Any = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_lowerCAmelCase : List[Any] = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
_lowerCAmelCase : Union[str, Any] = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
_lowerCAmelCase : Optional[int] = """"""
_lowerCAmelCase : Union[str, Any] = tokenizer(_lowerCamelCase , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
_lowerCAmelCase : List[Any] = original_model.generate({"""image""": original_pixel_values} )
_lowerCAmelCase : Dict = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , _lowerCamelCase )
_lowerCAmelCase : int = input_ids.shape[1]
_lowerCAmelCase : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
_lowerCAmelCase : List[str] = [text.strip() for text in output_text]
print("""HF generation:""" , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'nielsr/{model_name}' )
hf_model.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
UpperCamelCase_ = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
UpperCamelCase_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 309 | 0 |
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
__A = logging.get_logger(__name__)
def __a ( lowerCAmelCase_ : Optional[int] ,lowerCAmelCase_ : Dict ) -> Dict:
'''simple docstring'''
try:
with open(_snake_case ,"""rb""" ) as flax_state_f:
UpperCAmelCase_= from_bytes(_snake_case ,flax_state_f.read() )
except UnpicklingError as e:
try:
with open(_snake_case ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(_snake_case ,_snake_case )
def __a ( lowerCAmelCase_ : List[str] ,lowerCAmelCase_ : Optional[int] ) -> Any:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
UpperCAmelCase_= flatten_dict(jax.tree_util.tree_map(lambda lowerCAmelCase_ : x.dtype == jnp.bfloataa ,_snake_case ) ).values()
if any(_snake_case ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
UpperCAmelCase_= jax.tree_util.tree_map(
lambda lowerCAmelCase_ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params ,_snake_case )
UpperCAmelCase_= ""
UpperCAmelCase_= flatten_dict(_snake_case ,sep=""".""" )
UpperCAmelCase_= pt_model.state_dict()
# keep track of unexpected & missing keys
UpperCAmelCase_= []
UpperCAmelCase_= set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCAmelCase_= flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
UpperCAmelCase_= flax_key_tuple_array[:-1] + ["weight"]
UpperCAmelCase_= jnp.transpose(_snake_case ,(3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
UpperCAmelCase_= flax_key_tuple_array[:-1] + ["weight"]
UpperCAmelCase_= flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
UpperCAmelCase_= flax_key_tuple_array[:-1] + ["weight"]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(_snake_case ):
UpperCAmelCase_= (
flax_key_tuple_string.replace("""_0""" ,""".0""" )
.replace("""_1""" ,""".1""" )
.replace("""_2""" ,""".2""" )
.replace("""_3""" ,""".3""" )
.replace("""_4""" ,""".4""" )
.replace("""_5""" ,""".5""" )
.replace("""_6""" ,""".6""" )
.replace("""_7""" ,""".7""" )
.replace("""_8""" ,""".8""" )
.replace("""_9""" ,""".9""" )
)
UpperCAmelCase_= ".".join(_snake_case )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
F"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
UpperCAmelCase_= np.asarray(_snake_case ) if not isinstance(_snake_case ,np.ndarray ) else flax_tensor
UpperCAmelCase_= torch.from_numpy(_snake_case )
# remove from missing keys
missing_keys.remove(_snake_case )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_snake_case )
pt_model.load_state_dict(_snake_case )
# re-transform missing_keys to list
UpperCAmelCase_= list(_snake_case )
if len(_snake_case ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
F""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(_snake_case ) > 0:
logger.warning(
F"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
F""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
""" use it for predictions and inference.""" )
return pt_model
| 369 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class lowercase ( unittest.TestCase):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
UpperCAmelCase_= tempfile.mkdtemp()
UpperCAmelCase_= BlipImageProcessor()
UpperCAmelCase_= GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
UpperCAmelCase_= BlipaProcessor(__UpperCAmelCase , __UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **__UpperCAmelCase : Union[str, Any] ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).tokenizer
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , **__UpperCAmelCase : str ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).image_processor
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
UpperCAmelCase_= [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase_= [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
UpperCAmelCase_= BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_= self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCAmelCase_= self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
UpperCAmelCase_= BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
UpperCAmelCase_= self.get_image_processor()
UpperCAmelCase_= self.get_tokenizer()
UpperCAmelCase_= BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
UpperCAmelCase_= self.prepare_image_inputs()
UpperCAmelCase_= image_processor(__UpperCAmelCase , return_tensors="""np""" )
UpperCAmelCase_= processor(images=__UpperCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
UpperCAmelCase_= self.get_image_processor()
UpperCAmelCase_= self.get_tokenizer()
UpperCAmelCase_= BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
UpperCAmelCase_= """lower newer"""
UpperCAmelCase_= processor(text=__UpperCAmelCase )
UpperCAmelCase_= tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase_= self.get_image_processor()
UpperCAmelCase_= self.get_tokenizer()
UpperCAmelCase_= BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
UpperCAmelCase_= """lower newer"""
UpperCAmelCase_= self.prepare_image_inputs()
UpperCAmelCase_= processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def _SCREAMING_SNAKE_CASE ( self : str ) -> Any:
UpperCAmelCase_= self.get_image_processor()
UpperCAmelCase_= self.get_tokenizer()
UpperCAmelCase_= BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
UpperCAmelCase_= [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_= processor.batch_decode(__UpperCAmelCase )
UpperCAmelCase_= tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
UpperCAmelCase_= self.get_image_processor()
UpperCAmelCase_= self.get_tokenizer()
UpperCAmelCase_= BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
UpperCAmelCase_= """lower newer"""
UpperCAmelCase_= self.prepare_image_inputs()
UpperCAmelCase_= processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 277 | 0 |
import os
import sys
UpperCAmelCase_ = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
UpperCAmelCase_ = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def lowerCamelCase__ ( *A__ : Optional[int] , **A__ : Optional[Any] ):
'''simple docstring'''
return AutoConfig.from_pretrained(*A__ , **A__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def lowerCamelCase__ ( *A__ : Dict , **A__ : str ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(*A__ , **A__ )
@add_start_docstrings(AutoModel.__doc__ )
def lowerCamelCase__ ( *A__ : Optional[int] , **A__ : Any ):
'''simple docstring'''
return AutoModel.from_pretrained(*A__ , **A__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def lowerCamelCase__ ( *A__ : List[str] , **A__ : List[str] ):
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*A__ , **A__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def lowerCamelCase__ ( *A__ : Union[str, Any] , **A__ : int ):
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*A__ , **A__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def lowerCamelCase__ ( *A__ : Optional[int] , **A__ : List[Any] ):
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*A__ , **A__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def lowerCamelCase__ ( *A__ : Optional[int] , **A__ : int ):
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*A__ , **A__ )
| 12 |
"""simple docstring"""
from math import pi, sqrt, tan
def _snake_case ( UpperCamelCase : float ):
if side_length < 0:
raise ValueError("""surface_area_cube() only accepts non-negative values""" )
return 6 * side_length**2
def _snake_case ( UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError("""surface_area_cuboid() only accepts non-negative values""" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _snake_case ( UpperCamelCase : float ):
if radius < 0:
raise ValueError("""surface_area_sphere() only accepts non-negative values""" )
return 4 * pi * radius**2
def _snake_case ( UpperCamelCase : float ):
if radius < 0:
raise ValueError("""surface_area_hemisphere() only accepts non-negative values""" )
return 3 * pi * radius**2
def _snake_case ( UpperCamelCase : float , UpperCamelCase : float ):
if radius < 0 or height < 0:
raise ValueError("""surface_area_cone() only accepts non-negative values""" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _snake_case ( UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"""surface_area_conical_frustum() only accepts non-negative values""" )
UpperCAmelCase : Tuple = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _snake_case ( UpperCamelCase : float , UpperCamelCase : float ):
if radius < 0 or height < 0:
raise ValueError("""surface_area_cylinder() only accepts non-negative values""" )
return 2 * pi * radius * (height + radius)
def _snake_case ( UpperCamelCase : float , UpperCamelCase : float ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError("""surface_area_torus() only accepts non-negative values""" )
if torus_radius < tube_radius:
raise ValueError(
"""surface_area_torus() does not support spindle or self intersecting tori""" )
return 4 * pow(UpperCamelCase , 2 ) * torus_radius * tube_radius
def _snake_case ( UpperCamelCase : float , UpperCamelCase : float ):
if length < 0 or width < 0:
raise ValueError("""area_rectangle() only accepts non-negative values""" )
return length * width
def _snake_case ( UpperCamelCase : float ):
if side_length < 0:
raise ValueError("""area_square() only accepts non-negative values""" )
return side_length**2
def _snake_case ( UpperCamelCase : float , UpperCamelCase : float ):
if base < 0 or height < 0:
raise ValueError("""area_triangle() only accepts non-negative values""" )
return (base * height) / 2
def _snake_case ( UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("""area_triangle_three_sides() only accepts non-negative values""" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("""Given three sides do not form a triangle""" )
UpperCAmelCase : Union[str, Any] = (sidea + sidea + sidea) / 2
UpperCAmelCase : Union[str, Any] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _snake_case ( UpperCamelCase : float , UpperCamelCase : float ):
if base < 0 or height < 0:
raise ValueError("""area_parallelogram() only accepts non-negative values""" )
return base * height
def _snake_case ( UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError("""area_trapezium() only accepts non-negative values""" )
return 1 / 2 * (basea + basea) * height
def _snake_case ( UpperCamelCase : float ):
if radius < 0:
raise ValueError("""area_circle() only accepts non-negative values""" )
return pi * radius**2
def _snake_case ( UpperCamelCase : float , UpperCamelCase : float ):
if radius_x < 0 or radius_y < 0:
raise ValueError("""area_ellipse() only accepts non-negative values""" )
return pi * radius_x * radius_y
def _snake_case ( UpperCamelCase : float , UpperCamelCase : float ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("""area_rhombus() only accepts non-negative values""" )
return 1 / 2 * diagonal_a * diagonal_a
def _snake_case ( UpperCamelCase : int , UpperCamelCase : float ):
if not isinstance(UpperCamelCase , UpperCamelCase ) or sides < 3:
raise ValueError(
"""area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides""" )
elif length < 0:
raise ValueError(
"""area_reg_polygon() only accepts non-negative values as \
length of a side""" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f"""Rectangle: {area_rectangle(1_0, 2_0) = }""")
print(f"""Square: {area_square(1_0) = }""")
print(f"""Triangle: {area_triangle(1_0, 1_0) = }""")
print(f"""Triangle: {area_triangle_three_sides(5, 1_2, 1_3) = }""")
print(f"""Parallelogram: {area_parallelogram(1_0, 2_0) = }""")
print(f"""Rhombus: {area_rhombus(1_0, 2_0) = }""")
print(f"""Trapezium: {area_trapezium(1_0, 2_0, 3_0) = }""")
print(f"""Circle: {area_circle(2_0) = }""")
print(f"""Ellipse: {area_ellipse(1_0, 2_0) = }""")
print("\nSurface Areas of various geometric shapes: \n")
print(f"""Cube: {surface_area_cube(2_0) = }""")
print(f"""Cuboid: {surface_area_cuboid(1_0, 2_0, 3_0) = }""")
print(f"""Sphere: {surface_area_sphere(2_0) = }""")
print(f"""Hemisphere: {surface_area_hemisphere(2_0) = }""")
print(f"""Cone: {surface_area_cone(1_0, 2_0) = }""")
print(f"""Conical Frustum: {surface_area_conical_frustum(1_0, 2_0, 3_0) = }""")
print(f"""Cylinder: {surface_area_cylinder(1_0, 2_0) = }""")
print(f"""Torus: {surface_area_torus(2_0, 1_0) = }""")
print(f"""Equilateral Triangle: {area_reg_polygon(3, 1_0) = }""")
print(f"""Square: {area_reg_polygon(4, 1_0) = }""")
print(f"""Reqular Pentagon: {area_reg_polygon(5, 1_0) = }""")
| 109 | 0 |
def lowercase ( A_ = 200 )-> int:
'''simple docstring'''
a : List[str] = [1, 2, 5, 10, 20, 50, 100, 200]
a : Union[str, Any] = [0] * (pence + 1)
a : Union[str, Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(A_ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 371 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__lowercase = logging.get_logger(__name__)
@add_end_docstrings(_a )
class _A ( _a ):
"""simple docstring"""
def __init__( self : List[Any] , **__UpperCAmelCase : List[Any]):
super().__init__(**__UpperCAmelCase)
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''')
requires_backends(self , "vision")
self.check_model_type(__UpperCAmelCase)
def __call__( self : str , __UpperCAmelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , __UpperCAmelCase : Union[str, List[str]] = None , **__UpperCAmelCase : List[Any] , ):
if "text_queries" in kwargs:
a : List[Any] = kwargs.pop("text_queries")
if isinstance(__UpperCAmelCase , (str, Image.Image)):
a : Any = {"image": image, "candidate_labels": candidate_labels}
else:
a : Optional[int] = image
a : Optional[int] = super().__call__(__UpperCAmelCase , **__UpperCAmelCase)
return results
def __snake_case ( self : Optional[int] , **__UpperCAmelCase : List[Any]):
a : str = {}
if "threshold" in kwargs:
a : Dict = kwargs["threshold"]
if "top_k" in kwargs:
a : str = kwargs["top_k"]
return {}, {}, postprocess_params
def __snake_case ( self : List[Any] , __UpperCAmelCase : Optional[Any]):
a : Union[str, Any] = load_image(inputs["image"])
a : Any = inputs["candidate_labels"]
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : Optional[Any] = candidate_labels.split(",")
a : Union[str, Any] = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(__UpperCAmelCase):
a : int = self.tokenizer(__UpperCAmelCase , return_tensors=self.framework)
a : int = self.image_processor(__UpperCAmelCase , return_tensors=self.framework)
yield {
"is_last": i == len(__UpperCAmelCase) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __snake_case ( self : Dict , __UpperCAmelCase : Optional[int]):
a : List[Any] = model_inputs.pop("target_size")
a : Optional[int] = model_inputs.pop("candidate_label")
a : List[Any] = model_inputs.pop("is_last")
a : List[Any] = self.model(**__UpperCAmelCase)
a : Union[str, Any] = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def __snake_case ( self : str , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : List[str]=None):
a : Dict = []
for model_output in model_outputs:
a : int = model_output["candidate_label"]
a : Any = BaseModelOutput(__UpperCAmelCase)
a : Optional[Any] = self.image_processor.post_process_object_detection(
outputs=__UpperCAmelCase , threshold=__UpperCAmelCase , target_sizes=model_output["target_size"])[0]
for index in outputs["scores"].nonzero():
a : Any = outputs["scores"][index].item()
a : str = self._get_bounding_box(outputs["boxes"][index][0])
a : Optional[Any] = {"score": score, "label": label, "box": box}
results.append(__UpperCAmelCase)
a : str = sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase: x["score"] , reverse=__UpperCAmelCase)
if top_k:
a : Union[str, Any] = results[:top_k]
return results
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : "torch.Tensor"):
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
a , a , a , a : List[Any] = box.int().tolist()
a : str = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 226 | 0 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase : Optional[int] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class A__ :
A__ = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
A__ = field(
default=A__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
A__ = field(
default=A__ , metadata={'help': 'The column name of the images in the files.'} )
A__ = field(default=A__ , metadata={'help': 'A folder containing the training data.'} )
A__ = field(default=A__ , metadata={'help': 'A folder containing the validation data.'} )
A__ = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
A__ = field(
default=A__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
A__ = field(
default=A__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def A ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ={}
if self.train_dir is not None:
_SCREAMING_SNAKE_CASE =self.train_dir
if self.validation_dir is not None:
_SCREAMING_SNAKE_CASE =self.validation_dir
_SCREAMING_SNAKE_CASE =data_files if data_files else None
@dataclass
class A__ :
A__ = field(
default=A__ , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
A__ = field(
default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} )
A__ = field(
default=A__ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
A__ = field(
default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
A__ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
A__ = field(default=A__ , metadata={'help': 'Name or path of preprocessor config.'} )
A__ = field(
default=A__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
A__ = field(
default=0.75 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} )
A__ = field(
default=A__ , metadata={'help': 'Whether or not to train with normalized pixel values as target.'} )
@dataclass
class A__ ( A__ ):
A__ = field(
default=1E-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} )
def _lowerCAmelCase ( _UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def _lowerCAmelCase ( ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE =training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_SCREAMING_SNAKE_CASE =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_SCREAMING_SNAKE_CASE =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
_SCREAMING_SNAKE_CASE =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_SCREAMING_SNAKE_CASE =None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _UpperCamelCase ) and data_args.train_val_split > 0.0:
_SCREAMING_SNAKE_CASE =ds['train'].train_test_split(data_args.train_val_split )
_SCREAMING_SNAKE_CASE =split['train']
_SCREAMING_SNAKE_CASE =split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_SCREAMING_SNAKE_CASE ={
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_SCREAMING_SNAKE_CASE =ViTMAEConfig.from_pretrained(model_args.config_name , **_UpperCamelCase )
elif model_args.model_name_or_path:
_SCREAMING_SNAKE_CASE =ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_UpperCamelCase )
else:
_SCREAMING_SNAKE_CASE =ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(f"New config: {config}" )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
_SCREAMING_SNAKE_CASE =ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_UpperCamelCase )
elif model_args.model_name_or_path:
_SCREAMING_SNAKE_CASE =ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_UpperCamelCase )
else:
_SCREAMING_SNAKE_CASE =ViTImageProcessor()
# create model
if model_args.model_name_or_path:
_SCREAMING_SNAKE_CASE =ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_SCREAMING_SNAKE_CASE =ViTMAEForPreTraining(_UpperCamelCase )
if training_args.do_train:
_SCREAMING_SNAKE_CASE =ds['train'].column_names
else:
_SCREAMING_SNAKE_CASE =ds['validation'].column_names
if data_args.image_column_name is not None:
_SCREAMING_SNAKE_CASE =data_args.image_column_name
elif "image" in column_names:
_SCREAMING_SNAKE_CASE ='image'
elif "img" in column_names:
_SCREAMING_SNAKE_CASE ='img'
else:
_SCREAMING_SNAKE_CASE =column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
_SCREAMING_SNAKE_CASE =image_processor.size['shortest_edge']
else:
_SCREAMING_SNAKE_CASE =(image_processor.size['height'], image_processor.size['width'])
_SCREAMING_SNAKE_CASE =Compose(
[
Lambda(lambda _UpperCamelCase : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(_UpperCamelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(_UpperCamelCase : Dict ):
_SCREAMING_SNAKE_CASE =[transforms(_UpperCamelCase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
_SCREAMING_SNAKE_CASE =ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_UpperCamelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
_SCREAMING_SNAKE_CASE =(
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_UpperCamelCase )
# Compute absolute learning rate
_SCREAMING_SNAKE_CASE =(
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
_SCREAMING_SNAKE_CASE =training_args.base_learning_rate * total_train_batch_size / 2_56
# Initialize our trainer
_SCREAMING_SNAKE_CASE =Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_UpperCamelCase , data_collator=_UpperCamelCase , )
# Training
if training_args.do_train:
_SCREAMING_SNAKE_CASE =None
if training_args.resume_from_checkpoint is not None:
_SCREAMING_SNAKE_CASE =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_SCREAMING_SNAKE_CASE =last_checkpoint
_SCREAMING_SNAKE_CASE =trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_SCREAMING_SNAKE_CASE =trainer.evaluate()
trainer.log_metrics('eval' , _UpperCamelCase )
trainer.save_metrics('eval' , _UpperCamelCase )
# Write model card and (optionally) push to hub
_SCREAMING_SNAKE_CASE ={
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 47 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class A__ ( A__ , A__ ):
@register_to_config
def __init__( self : Dict , _a : int = 768 , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
_SCREAMING_SNAKE_CASE =nn.Parameter(torch.zeros(1 , _a ) )
_SCREAMING_SNAKE_CASE =nn.Parameter(torch.ones(1 , _a ) )
def A ( self : Tuple , _a : Optional[Union[str, torch.device]] = None , _a : Optional[torch.dtype] = None , ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =nn.Parameter(self.mean.to(_a ).to(_a ) )
_SCREAMING_SNAKE_CASE =nn.Parameter(self.std.to(_a ).to(_a ) )
return self
def A ( self : Tuple , _a : str ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =(embeds - self.mean) * 1.0 / self.std
return embeds
def A ( self : List[str] , _a : Optional[Any] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =(embeds * self.std) + self.mean
return embeds
| 47 | 1 |
from ... import PretrainedConfig
__snake_case = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Dict = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__lowerCamelCase : Tuple = """nezha"""
def __init__( self , snake_case__=2_1128 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=64 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=0.1 , snake_case__=0 , snake_case__=2 , snake_case__=3 , snake_case__=True , **snake_case__ , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
UpperCAmelCase : Tuple =vocab_size
UpperCAmelCase : Optional[int] =hidden_size
UpperCAmelCase : List[str] =num_hidden_layers
UpperCAmelCase : Any =num_attention_heads
UpperCAmelCase : Union[str, Any] =hidden_act
UpperCAmelCase : Union[str, Any] =intermediate_size
UpperCAmelCase : Optional[int] =hidden_dropout_prob
UpperCAmelCase : List[Any] =attention_probs_dropout_prob
UpperCAmelCase : int =max_position_embeddings
UpperCAmelCase : List[Any] =max_relative_position
UpperCAmelCase : int =type_vocab_size
UpperCAmelCase : Tuple =initializer_range
UpperCAmelCase : List[Any] =layer_norm_eps
UpperCAmelCase : Optional[int] =classifier_dropout
UpperCAmelCase : Dict =use_cache
| 78 | import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __snake_case ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Dict = StableUnCLIPPipeline
__lowerCamelCase : int = TEXT_TO_IMAGE_PARAMS
__lowerCamelCase : int = TEXT_TO_IMAGE_BATCH_PARAMS
__lowerCamelCase : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
__lowerCamelCase : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__lowerCamelCase : Optional[Any] = False
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : int =32
UpperCAmelCase : Union[str, Any] =embedder_hidden_size
# prior components
torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
UpperCAmelCase : int =CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case__ , projection_dim=snake_case__ , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
UpperCAmelCase : Dict =PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=snake_case__ , num_layers=1 , )
torch.manual_seed(0 )
UpperCAmelCase : Tuple =DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=snake_case__ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
UpperCAmelCase : Optional[int] =StableUnCLIPImageNormalizer(embedding_dim=snake_case__ )
UpperCAmelCase : Any =DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
UpperCAmelCase : List[str] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
UpperCAmelCase : List[str] =CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
UpperCAmelCase : Optional[int] =UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=snake_case__ , layers_per_block=1 , upcast_attention=snake_case__ , use_linear_projection=snake_case__ , )
torch.manual_seed(0 )
UpperCAmelCase : List[Any] =DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=snake_case__ , steps_offset=1 , )
torch.manual_seed(0 )
UpperCAmelCase : Dict =AutoencoderKL()
UpperCAmelCase : Tuple ={
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def UpperCAmelCase__ ( self , snake_case__ , snake_case__=0 ) -> List[Any]:
'''simple docstring'''
if str(snake_case__ ).startswith('''mps''' ):
UpperCAmelCase : Union[str, Any] =torch.manual_seed(snake_case__ )
else:
UpperCAmelCase : Any =torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
UpperCAmelCase : str ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Tuple =torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=snake_case__ )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : List[Any] =torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=snake_case__ )
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
UpperCAmelCase : Optional[int] =StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase : int =torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase : int =pipe('''anime turle''' , generator=snake_case__ , output_type='''np''' )
UpperCAmelCase : str =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase : List[str] =StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
UpperCAmelCase : str =pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase : Any =pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase : Tuple =torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 78 | 1 |
from __future__ import annotations
class __snake_case :
def __init__( self , snake_case__ ) -> int:
'''simple docstring'''
UpperCAmelCase : Tuple =order
# a_{0} ... a_{k}
UpperCAmelCase : List[Any] =[1.0] + [0.0] * order
# b_{0} ... b_{k}
UpperCAmelCase : List[Any] =[1.0] + [0.0] * order
# x[n-1] ... x[n-k]
UpperCAmelCase : int =[0.0] * self.order
# y[n-1] ... y[n-k]
UpperCAmelCase : Any =[0.0] * self.order
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> Tuple:
'''simple docstring'''
if len(__UpperCamelCase ) < self.order:
UpperCAmelCase : Tuple =[1.0, *a_coeffs]
if len(__UpperCamelCase ) != self.order + 1:
UpperCAmelCase : Union[str, Any] =(
f'''Expected a_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(__UpperCamelCase )}'''
)
raise ValueError(__UpperCamelCase )
if len(__UpperCamelCase ) != self.order + 1:
UpperCAmelCase : List[Any] =(
f'''Expected b_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(__UpperCamelCase )}'''
)
raise ValueError(__UpperCamelCase )
UpperCAmelCase : int =a_coeffs
UpperCAmelCase : Dict =b_coeffs
def UpperCAmelCase__ ( self , snake_case__ ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Tuple =0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
UpperCAmelCase : Dict =(result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
UpperCAmelCase : Optional[Any] =self.input_history[:-1]
UpperCAmelCase : List[Any] =self.output_history[:-1]
UpperCAmelCase : List[Any] =sample
UpperCAmelCase : str =result
return result
| 348 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCAmelCase :
def __init__( self :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :List[str]=13 , __UpperCamelCase :Any=30 , __UpperCamelCase :int=2 , __UpperCamelCase :Union[str, Any]=3 , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :Optional[int]=True , __UpperCamelCase :List[str]=32 , __UpperCamelCase :List[Any]=5 , __UpperCamelCase :Dict=4 , __UpperCamelCase :List[str]=37 , __UpperCamelCase :str="gelu" , __UpperCamelCase :Union[str, Any]=0.1 , __UpperCamelCase :List[Any]=0.1 , __UpperCamelCase :Tuple=10 , __UpperCamelCase :Tuple=0.02 , __UpperCamelCase :int=None , ):
A = parent
A = batch_size
A = image_size
A = patch_size
A = num_channels
A = is_training
A = use_labels
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = type_sequence_label_size
A = initializer_range
A = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A = (image_size // patch_size) ** 2
A = num_patches + 1
def lowerCamelCase ( self :Any ):
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self :Union[str, Any] ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase ( self :Dict , __UpperCamelCase :Dict , __UpperCamelCase :Any , __UpperCamelCase :Any ):
A = ViTMSNModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self :Optional[int] , __UpperCamelCase :List[str] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Optional[Any] ):
A = self.type_sequence_label_size
A = ViTMSNForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = model(__UpperCamelCase , labels=__UpperCamelCase )
print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" )
print("Labels: {labels}" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A = 1
A = ViTMSNForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase ( self :Optional[Any] ):
A = self.prepare_config_and_inputs()
A, A, A = config_and_inputs
A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
UpperCamelCase = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def lowerCamelCase ( self :Optional[int] ):
A = ViTMSNModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def lowerCamelCase ( self :Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMSN does not use inputs_embeds" )
def lowerCamelCase ( self :Union[str, Any] ):
pass
def lowerCamelCase ( self :int ):
A, A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def lowerCamelCase ( self :Tuple ):
A, A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__UpperCamelCase )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowerCamelCase ( self :List[str] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCamelCase ( self :Dict ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def lowerCamelCase ( self :List[Any] ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = ViTMSNModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def A__ ( ):
A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self :Union[str, Any] ):
return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None
@slow
def lowerCamelCase ( self :Any ):
torch.manual_seed(2 )
A = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(__UpperCamelCase )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
A = model(**__UpperCamelCase )
# verify the logits
A = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
A = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
| 292 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ : List[Any] = {
"""configuration_megatron_bert""": ["""MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegatronBertConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = [
"""MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegatronBertForCausalLM""",
"""MegatronBertForMaskedLM""",
"""MegatronBertForMultipleChoice""",
"""MegatronBertForNextSentencePrediction""",
"""MegatronBertForPreTraining""",
"""MegatronBertForQuestionAnswering""",
"""MegatronBertForSequenceClassification""",
"""MegatronBertForTokenClassification""",
"""MegatronBertModel""",
"""MegatronBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 223 |
def A__ ( lowerCamelCase , lowerCamelCase ) -> list:
UpperCamelCase_: Optional[int] = word.split()
def justify(lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> str:
UpperCamelCase_: Tuple = max_width - width
UpperCamelCase_: Optional[Any] = len(lowerCamelCase )
if len(lowerCamelCase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
UpperCamelCase_: List[Any] = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
UpperCamelCase_: Optional[Any] = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
UpperCamelCase_: List[str] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(lowerCamelCase ):
num_spaces_between_words_list[i] += 1
UpperCamelCase_: Dict = []
for i in range(lowerCamelCase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(lowerCamelCase )
UpperCamelCase_: Optional[int] = []
UpperCamelCase_: list[str] = []
UpperCamelCase_: List[str] = 0
for word in words:
if width + len(lowerCamelCase ) + len(lowerCamelCase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(lowerCamelCase )
width += len(lowerCamelCase )
else:
# justify the line and add it to result
answer.append(justify(lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
# reset new line and new width
UpperCamelCase_, UpperCamelCase_: List[str] = [word], len(lowerCamelCase )
UpperCamelCase_: List[str] = max_width - width - len(lowerCamelCase )
answer.append(""" """.join(lowerCamelCase ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 223 | 1 |
'''simple docstring'''
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _a ( __a ):
__a : List[Any] = CustomTokenizer
pass
| 34 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
a_ :int = {
"configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[str] = [
"LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongT5EncoderModel",
"LongT5ForConditionalGeneration",
"LongT5Model",
"LongT5PreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :int = [
"FlaxLongT5ForConditionalGeneration",
"FlaxLongT5Model",
"FlaxLongT5PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
a_ :Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 277 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowerCamelCase__ :
'''simple docstring'''
lowerCamelCase = LEDConfig
lowerCamelCase = {}
lowerCamelCase = '''gelu'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=20 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=4 , ) -> List[Any]:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =seq_length
_lowerCAmelCase =is_training
_lowerCAmelCase =use_labels
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =eos_token_id
_lowerCAmelCase =pad_token_id
_lowerCAmelCase =bos_token_id
_lowerCAmelCase =attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_lowerCAmelCase =self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_lowerCAmelCase =(
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _lowerCAmelCase ( self ) -> List[str]:
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_lowerCAmelCase =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_lowerCAmelCase =tf.concat([input_ids, eos_tensor] , axis=1 )
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_lowerCAmelCase =prepare_led_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =tf.concat(
[tf.zeros_like(__UpperCAmelCase )[:, :-1], tf.ones_like(__UpperCAmelCase )[:, -1:]] , axis=-1 , )
_lowerCAmelCase =global_attention_mask
return config, inputs_dict
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
_lowerCAmelCase =TFLEDModel(config=__UpperCAmelCase ).get_decoder()
_lowerCAmelCase =inputs_dict["""input_ids"""]
_lowerCAmelCase =input_ids[:1, :]
_lowerCAmelCase =inputs_dict["""attention_mask"""][:1, :]
_lowerCAmelCase =1
# first forward pass
_lowerCAmelCase =model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
_lowerCAmelCase , _lowerCAmelCase =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase =ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCAmelCase =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_lowerCAmelCase =tf.concat([input_ids, next_tokens] , axis=-1 )
_lowerCAmelCase =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_lowerCAmelCase =model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
_lowerCAmelCase =model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_lowerCAmelCase =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_lowerCAmelCase =output_from_no_past[:, -3:, random_slice_idx]
_lowerCAmelCase =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , rtol=1e-3 )
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , ) -> Tuple:
if attention_mask is None:
_lowerCAmelCase =tf.cast(tf.math.not_equal(__UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_lowerCAmelCase =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_lowerCAmelCase =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowerCAmelCase =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowerCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
lowerCamelCase = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
lowerCamelCase = (
{
'''conversational''': TFLEDForConditionalGeneration,
'''feature-extraction''': TFLEDModel,
'''summarization''': TFLEDForConditionalGeneration,
'''text2text-generation''': TFLEDForConditionalGeneration,
'''translation''': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCamelCase = True
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =TFLEDModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> Any:
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase =tf.zeros_like(inputs_dict["""attention_mask"""] )
_lowerCAmelCase =2
_lowerCAmelCase =tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["""global_attention_mask"""] , )
_lowerCAmelCase =True
_lowerCAmelCase =self.model_tester.seq_length
_lowerCAmelCase =self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__UpperCAmelCase ):
_lowerCAmelCase =outputs.decoder_attentions
self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__UpperCAmelCase ):
_lowerCAmelCase =[t.numpy() for t in outputs.encoder_attentions]
_lowerCAmelCase =[t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_lowerCAmelCase =True
_lowerCAmelCase =False
_lowerCAmelCase =False
_lowerCAmelCase =model_class(__UpperCAmelCase )
_lowerCAmelCase =model(self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
_lowerCAmelCase =len(__UpperCAmelCase )
self.assertEqual(config.output_hidden_states , __UpperCAmelCase )
check_encoder_attentions_output(__UpperCAmelCase )
if self.is_encoder_decoder:
_lowerCAmelCase =model_class(__UpperCAmelCase )
_lowerCAmelCase =model(self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , __UpperCAmelCase )
check_decoder_attentions_output(__UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_lowerCAmelCase =True
_lowerCAmelCase =model_class(__UpperCAmelCase )
_lowerCAmelCase =model(self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , __UpperCAmelCase )
check_encoder_attentions_output(__UpperCAmelCase )
# Check attention is always last and order is fine
_lowerCAmelCase =True
_lowerCAmelCase =True
_lowerCAmelCase =model_class(__UpperCAmelCase )
_lowerCAmelCase =model(self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , __UpperCAmelCase )
check_encoder_attentions_output(__UpperCAmelCase )
@unittest.skip("""LED keeps using potentially symbolic tensors in conditionals and breaks tracing.""" )
def _lowerCAmelCase ( self ) -> List[str]:
pass
def _lowerCAmelCase ( self ) -> str:
# TODO: Head-masking not yet implement
pass
def _lowerCamelCase(__UpperCamelCase ) -> Tuple:
return tf.constant(__UpperCamelCase , dtype=tf.intaa )
__A = 1E-4
@slow
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Optional[int]:
_lowerCAmelCase =TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ).led
# change to intended input here
_lowerCAmelCase =_long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
_lowerCAmelCase =_long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
_lowerCAmelCase =prepare_led_inputs_dict(model.config , __UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =model(**__UpperCAmelCase )[0]
_lowerCAmelCase =(1, 10_24, 7_68)
self.assertEqual(output.shape , __UpperCAmelCase )
# change to expected output here
_lowerCAmelCase =tf.convert_to_tensor(
[[2.3_0_5_0, 2.8_2_7_9, 0.6_5_3_1], [-1.8_4_5_7, -0.1_4_5_5, -3.5_6_6_1], [-1.0_1_8_6, 0.4_5_8_6, -2.2_0_4_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCAmelCase , atol=1e-3 )
def _lowerCAmelCase ( self ) -> str:
_lowerCAmelCase =TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" )
# change to intended input here
_lowerCAmelCase =_long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
_lowerCAmelCase =_long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
_lowerCAmelCase =prepare_led_inputs_dict(model.config , __UpperCAmelCase , __UpperCAmelCase )
_lowerCAmelCase =model(**__UpperCAmelCase )[0]
_lowerCAmelCase =(1, 10_24, model.config.vocab_size)
self.assertEqual(output.shape , __UpperCAmelCase )
# change to expected output here
_lowerCAmelCase =tf.convert_to_tensor(
[[33.65_07, 6.4_5_7_2, 16.80_89], [5.8_7_3_9, -2.4_2_3_8, 11.29_02], [-3.2_1_3_9, -4.3_1_4_9, 4.2_7_8_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCAmelCase , atol=1e-3 , rtol=1e-3 )
| 362 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 | 0 |
'''simple docstring'''
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
assert column_title.isupper()
A_ : Union[str, Any] = 0
A_ : Optional[Any] = len(_UpperCAmelCase ) - 1
A_ : Dict = 0
while index >= 0:
A_ : List[Any] = (ord(column_title[index] ) - 6_4) * pow(2_6 , _UpperCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 344 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def a ( _UpperCAmelCase : Any , _UpperCAmelCase : Any=None ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = None
if token is not None:
__UpperCAmelCase : Optional[Any] = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'Bearer {token}'}
__UpperCAmelCase : List[str] = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
__UpperCAmelCase : Union[str, Any] = requests.get(_UpperCAmelCase , headers=_UpperCAmelCase ).json()
__UpperCAmelCase : Union[str, Any] = {}
try:
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
__UpperCAmelCase : List[Any] = math.ceil((result['''total_count'''] - 1_00) / 1_00 )
for i in range(_UpperCAmelCase ):
__UpperCAmelCase : Optional[Any] = requests.get(url + f'&page={i + 2}' , headers=_UpperCAmelCase ).json()
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return job_links
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def a ( _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str]=None ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = None
if token is not None:
__UpperCAmelCase : Tuple = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'Bearer {token}'}
__UpperCAmelCase : int = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'
__UpperCAmelCase : str = requests.get(_UpperCAmelCase , headers=_UpperCAmelCase ).json()
__UpperCAmelCase : List[Any] = {}
try:
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
__UpperCAmelCase : str = math.ceil((result['''total_count'''] - 1_00) / 1_00 )
for i in range(_UpperCAmelCase ):
__UpperCAmelCase : Dict = requests.get(url + f'&page={i + 2}' , headers=_UpperCAmelCase ).json()
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
return artifacts
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def a ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int ):
'''simple docstring'''
__UpperCAmelCase : str = None
if token is not None:
__UpperCAmelCase : str = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'Bearer {token}'}
__UpperCAmelCase : int = requests.get(_UpperCAmelCase , headers=_UpperCAmelCase , allow_redirects=_UpperCAmelCase )
__UpperCAmelCase : Optional[int] = result.headers['''Location''']
__UpperCAmelCase : Optional[int] = requests.get(_UpperCAmelCase , allow_redirects=_UpperCAmelCase )
__UpperCAmelCase : int = os.path.join(_UpperCAmelCase , f'{artifact_name}.zip' )
with open(_UpperCAmelCase , '''wb''' ) as fp:
fp.write(response.content )
def a ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[str]=None ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : int = []
__UpperCAmelCase : List[Any] = None
with zipfile.ZipFile(_UpperCAmelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_UpperCAmelCase ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_UpperCAmelCase ) as f:
for line in f:
__UpperCAmelCase : Tuple = line.decode('''UTF-8''' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
__UpperCAmelCase : int = line[: line.index(''': ''' )]
__UpperCAmelCase : Any = line[line.index(''': ''' ) + len(''': ''' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('''FAILED ''' ):
# `test` is the test method that failed
__UpperCAmelCase : str = line[len('''FAILED ''' ) :]
failed_tests.append(_UpperCAmelCase )
elif filename == "job_name.txt":
__UpperCAmelCase : Tuple = line
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError(
f'`errors` and `failed_tests` should have the same number of elements. Got {len(_UpperCAmelCase )} for `errors` '
f'and {len(_UpperCAmelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'
''' problem.''' )
__UpperCAmelCase : str = None
if job_name and job_links:
__UpperCAmelCase : Any = job_links.get(_UpperCAmelCase , _UpperCAmelCase )
# A list with elements of the form (line of error, error, failed test)
__UpperCAmelCase : Dict = [x + [y] + [job_link] for x, y in zip(_UpperCAmelCase , _UpperCAmelCase )]
return result
def a ( _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple=None ):
'''simple docstring'''
__UpperCAmelCase : str = []
__UpperCAmelCase : str = [os.path.join(_UpperCAmelCase , _UpperCAmelCase ) for p in os.listdir(_UpperCAmelCase ) if p.endswith('''.zip''' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_UpperCAmelCase , job_links=_UpperCAmelCase ) )
return errors
def a ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any]=None ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = Counter()
counter.update([x[1] for x in logs] )
__UpperCAmelCase : Dict = counter.most_common()
__UpperCAmelCase : int = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
__UpperCAmelCase : Tuple = {'''count''': count, '''failed_tests''': [(x[2], x[0]) for x in logs if x[1] == error]}
__UpperCAmelCase : Union[str, Any] = dict(sorted(r.items() , key=lambda _UpperCAmelCase : item[1]["count"] , reverse=_UpperCAmelCase ) )
return r
def a ( _UpperCAmelCase : Dict ):
'''simple docstring'''
__UpperCAmelCase : List[str] = test.split('''::''' )[0]
if test.startswith('''tests/models/''' ):
__UpperCAmelCase : Optional[int] = test.split('''/''' )[2]
else:
__UpperCAmelCase : Optional[Any] = None
return test
def a ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int=None ):
'''simple docstring'''
__UpperCAmelCase : Dict = [(x[0], x[1], get_model(x[2] )) for x in logs]
__UpperCAmelCase : int = [x for x in logs if x[2] is not None]
__UpperCAmelCase : Tuple = {x[2] for x in logs}
__UpperCAmelCase : List[str] = {}
for test in tests:
__UpperCAmelCase : List[str] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
__UpperCAmelCase : Tuple = counter.most_common()
__UpperCAmelCase : Any = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
__UpperCAmelCase : List[Any] = sum(error_counts.values() )
if n_errors > 0:
__UpperCAmelCase : Tuple = {'''count''': n_errors, '''errors''': error_counts}
__UpperCAmelCase : Optional[int] = dict(sorted(r.items() , key=lambda _UpperCAmelCase : item[1]["count"] , reverse=_UpperCAmelCase ) )
return r
def a ( _UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = '''| no. | error | status |'''
__UpperCAmelCase : List[str] = '''|-:|:-|:-|'''
__UpperCAmelCase : Union[str, Any] = [header, sep]
for error in reduced_by_error:
__UpperCAmelCase : Tuple = reduced_by_error[error]['''count''']
__UpperCAmelCase : int = f'| {count} | {error[:1_00]} | |'
lines.append(_UpperCAmelCase )
return "\n".join(_UpperCAmelCase )
def a ( _UpperCAmelCase : Dict ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = '''| model | no. of errors | major error | count |'''
__UpperCAmelCase : List[Any] = '''|-:|-:|-:|-:|'''
__UpperCAmelCase : Tuple = [header, sep]
for model in reduced_by_model:
__UpperCAmelCase : List[Any] = reduced_by_model[model]['''count''']
__UpperCAmelCase , __UpperCAmelCase : int = list(reduced_by_model[model]['''errors'''].items() )[0]
__UpperCAmelCase : Dict = f'| {model} | {count} | {error[:60]} | {_count} |'
lines.append(_UpperCAmelCase )
return "\n".join(_UpperCAmelCase )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
__A =parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
__A =get_job_links(args.workflow_run_id, token=args.token)
__A ={}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
__A =k.find(" / ")
__A =k[index + len(" / ") :]
__A =v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
__A =get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
__A =get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
__A =Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
__A =counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
__A =reduce_by_error(errors)
__A =reduce_by_model(errors)
__A =make_github_table(reduced_by_error)
__A =make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 226 | 0 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
# TODO: upload to AWS
UpperCamelCase_ = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
a_ : List[Any] = """retribert"""
def __init__( self , __UpperCAmelCase=3_05_22 , __UpperCAmelCase=7_68 , __UpperCAmelCase=8 , __UpperCAmelCase=12 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=True , __UpperCAmelCase=1_28 , __UpperCAmelCase=0 , **__UpperCAmelCase , ) ->int:
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = hidden_act
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = initializer_range
a_ = layer_norm_eps
a_ = share_encoders
a_ = projection_dim | 369 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 303 | 0 |
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def UpperCAmelCase__ ( self :List[Any] ) -> Dict:
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = 8
# DPR tok
UpperCAmelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
UpperCAmelCase = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
UpperCAmelCase = os.path.join(lowercase_ , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
UpperCAmelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
UpperCAmelCase = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
UpperCAmelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
UpperCAmelCase = {'unk_token': '<unk>'}
UpperCAmelCase = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
UpperCAmelCase = os.path.join(lowercase_ , BART_VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase = os.path.join(lowercase_ , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowercase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowercase_ ) )
def UpperCAmelCase__ ( self :int ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def UpperCAmelCase__ ( self :List[Any] ) -> DPRContextEncoderTokenizer:
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def UpperCAmelCase__ ( self :Optional[int] ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def UpperCAmelCase__ ( self :Optional[int] ) -> int:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self :List[Any] ) -> str:
UpperCAmelCase = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def UpperCAmelCase__ ( self :Tuple ) -> Union[str, Any]:
UpperCAmelCase = self.get_dummy_dataset()
UpperCAmelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
UpperCAmelCase = dataset
UpperCAmelCase = RagRetriever(
lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def UpperCAmelCase__ ( self :Tuple , lowercase_ :bool ) -> Optional[int]:
UpperCAmelCase = self.get_dummy_dataset()
UpperCAmelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , )
if from_disk:
UpperCAmelCase = os.path.join(self.tmpdirname , 'dataset' )
UpperCAmelCase = os.path.join(self.tmpdirname , 'index.faiss' )
dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname , 'index.faiss' ) )
dataset.drop_index('embeddings' )
dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset' ) )
del dataset
UpperCAmelCase = RagRetriever(
lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
UpperCAmelCase = RagRetriever(
lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , lowercase_ ) , )
return retriever
def UpperCAmelCase__ ( self :List[str] ) -> Union[str, Any]:
UpperCAmelCase = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
UpperCAmelCase = os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index' )
dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr' )
pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb' ) )
UpperCAmelCase = os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl' )
UpperCAmelCase = {sample['id']: [sample['text'], sample['title']] for sample in dataset}
pickle.dump(lowercase_ , open(lowercase_ , 'wb' ) )
UpperCAmelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , )
UpperCAmelCase = RagRetriever(
lowercase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def UpperCAmelCase__ ( self :Optional[Any] ) -> Dict:
UpperCAmelCase = 1
UpperCAmelCase = self.get_dummy_canonical_hf_index_retriever()
UpperCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = retriever.retrieve(lowercase_ , n_docs=lowercase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowercase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , lowercase_ )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase__ ( self :Dict ) -> Optional[int]:
UpperCAmelCase = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
UpperCAmelCase = self.get_dummy_dataset()
retriever.save_pretrained(lowercase_ )
UpperCAmelCase = RagRetriever.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
UpperCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase = retriever.retrieve(lowercase_ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase__ ( self :int ) -> List[str]:
UpperCAmelCase = 1
UpperCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ )
UpperCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = retriever.retrieve(lowercase_ , n_docs=lowercase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowercase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , lowercase_ )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Optional[int]:
UpperCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowercase_ )
UpperCAmelCase = RagRetriever.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
UpperCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase = retriever.retrieve(lowercase_ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase__ ( self :List[Any] ) -> Any:
UpperCAmelCase = 1
UpperCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ )
UpperCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = retriever.retrieve(lowercase_ , n_docs=lowercase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowercase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , lowercase_ )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase__ ( self :Union[str, Any] ) -> int:
UpperCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowercase_ )
UpperCAmelCase = RagRetriever.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
UpperCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase = retriever.retrieve(lowercase_ , n_docs=1 )
self.assertTrue(out is not None )
def UpperCAmelCase__ ( self :List[str] ) -> Optional[Any]:
UpperCAmelCase = 1
UpperCAmelCase = self.get_dummy_legacy_index_retriever()
UpperCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = retriever.retrieve(lowercase_ , n_docs=lowercase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowercase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['text', 'title'] )
self.assertEqual(len(doc_dicts[0]['text'] ) , lowercase_ )
self.assertEqual(doc_dicts[0]['text'][0] , 'bar' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['text'][0] , 'foo' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def UpperCAmelCase__ ( self :List[str] ) -> Tuple:
UpperCAmelCase = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowercase_ )
UpperCAmelCase = RagRetriever.from_pretrained(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
UpperCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase = retriever.retrieve(lowercase_ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase__ ( self :Optional[Any] ) -> List[Any]:
import torch
UpperCAmelCase = 1
UpperCAmelCase = self.get_dummy_canonical_hf_index_retriever()
UpperCAmelCase = [[5, 7], [10, 11]]
UpperCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase = retriever(lowercase_ , lowercase_ , prefix=retriever.config.generator.prefix , n_docs=lowercase_ )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = (
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertIsInstance(lowercase_ , np.ndarray )
UpperCAmelCase = retriever(
lowercase_ , lowercase_ , prefix=retriever.config.generator.prefix , n_docs=lowercase_ , return_tensors='pt' , )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = ( # noqa: F841
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
out['doc_ids'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(lowercase_ , torch.Tensor )
self.assertIsInstance(lowercase_ , torch.Tensor )
self.assertIsInstance(lowercase_ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def UpperCAmelCase__ ( self :List[Any] ) -> Dict:
UpperCAmelCase = self.get_dpr_ctx_encoder_tokenizer()
UpperCAmelCase = 1
UpperCAmelCase = self.get_dummy_custom_hf_index_retriever(from_disk=lowercase_ )
retriever.set_ctx_encoder_tokenizer(lowercase_ )
UpperCAmelCase = [[5, 7], [10, 11]]
UpperCAmelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase = retriever(lowercase_ , lowercase_ , prefix=retriever.config.generator.prefix , n_docs=lowercase_ )
self.assertEqual(
len(lowercase_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ) , lowercase_ ) # check for doc token related keys in dictionary.
| 78 |
"""simple docstring"""
class A_ :
"""simple docstring"""
def __init__( self :List[Any] , lowercase_ :int ) -> None:
UpperCAmelCase = size
UpperCAmelCase = [0] * size
UpperCAmelCase = [0] * size
@staticmethod
def UpperCAmelCase__ ( lowercase_ :int ) -> int:
return index | (index + 1)
@staticmethod
def UpperCAmelCase__ ( lowercase_ :int ) -> int:
return (index & (index + 1)) - 1
def UpperCAmelCase__ ( self :Any , lowercase_ :int , lowercase_ :int ) -> None:
UpperCAmelCase = value
while index < self.size:
UpperCAmelCase = self.get_prev(lowercase_ ) + 1
if current_left_border == index:
UpperCAmelCase = value
else:
UpperCAmelCase = max(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = self.get_next(lowercase_ )
def UpperCAmelCase__ ( self :List[str] , lowercase_ :int , lowercase_ :int ) -> int:
right -= 1 # Because of right is exclusive
UpperCAmelCase = 0
while left <= right:
UpperCAmelCase = self.get_prev(lowercase_ )
if left <= current_left:
UpperCAmelCase = max(lowercase_ , self.tree[right] )
UpperCAmelCase = current_left
else:
UpperCAmelCase = max(lowercase_ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 1 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def snake_case (__lowercase , __lowercase , __lowercase ) -> Any:
'''simple docstring'''
_snake_case : int = OmegaConf.load(__lowercase )
_snake_case : Tuple = torch.load(__lowercase , map_location="cpu" )["model"]
_snake_case : Dict = list(state_dict.keys() )
# extract state_dict for VQVAE
_snake_case : Any = {}
_snake_case : List[str] = "first_stage_model."
for key in keys:
if key.startswith(__lowercase ):
_snake_case : List[str] = state_dict[key]
# extract state_dict for UNetLDM
_snake_case : Optional[Any] = {}
_snake_case : Tuple = "model.diffusion_model."
for key in keys:
if key.startswith(__lowercase ):
_snake_case : Any = state_dict[key]
_snake_case : Union[str, Any] = config.model.params.first_stage_config.params
_snake_case : Any = config.model.params.unet_config.params
_snake_case : List[str] = VQModel(**__lowercase ).eval()
vqvae.load_state_dict(__lowercase )
_snake_case : Union[str, Any] = UNetLDMModel(**__lowercase ).eval()
unet.load_state_dict(__lowercase )
_snake_case : str = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__lowercase , )
_snake_case : str = LDMPipeline(__lowercase , __lowercase , __lowercase )
pipeline.save_pretrained(__lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
__SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path) | 284 | from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
@dataclass
class lowercase_ ( __snake_case ):
_lowerCamelCase = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self , **lowercase_ ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_snake_case : List[str] = deprecated_arg[3:]
_snake_case : Optional[int] = not kwargs.pop(lowercase_ )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
_snake_case : Tuple = kwargs.pop("tpu_name" , self.tpu_name )
_snake_case : Any = kwargs.pop("device_idx" , self.device_idx )
_snake_case : List[str] = kwargs.pop("eager_mode" , self.eager_mode )
_snake_case : List[str] = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**lowercase_ )
_lowerCamelCase = field(
default=__snake_case , metadata={'help': 'Name of TPU'} , )
_lowerCamelCase = field(
default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , )
_lowerCamelCase = field(default=__snake_case , metadata={'help': 'Benchmark models in eager model.'} )
_lowerCamelCase = field(
default=__snake_case , metadata={
'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'
} , )
@cached_property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
_snake_case : str = None
if self.tpu:
try:
if self.tpu_name:
_snake_case : Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
_snake_case : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_snake_case : Union[str, Any] = None
return tpu
@cached_property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
_snake_case : List[str] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
_snake_case : Any = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
_snake_case : Any = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" )
return strategy
@property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def UpperCamelCase ( self ):
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def UpperCamelCase ( self ):
return self.n_gpu > 0 | 284 | 1 |
'''simple docstring'''
from PIL import Image
def UpperCAmelCase_ ( __lowerCamelCase : Image ,__lowerCamelCase : float ):
def brightness(__lowerCamelCase : int ) -> float:
return 1_28 + level + (c - 1_28)
if not -255.0 <= level <= 255.0:
raise ValueError("level must be between -255.0 (black) and 255.0 (white)" )
return img.point(__lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
lowerCAmelCase : int =change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 223 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowerCamelCase : int = 1_00 ):
lowercase_ :Tuple = n * (n + 1) * (2 * n + 1) / 6
lowercase_ :List[str] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 223 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase = {
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = ['''MobileNetV2FeatureExtractor''']
__UpperCamelCase = ['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 38 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = ShapEImgaImgPipeline
SCREAMING_SNAKE_CASE_ : Any = ["""image"""]
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""image"""]
SCREAMING_SNAKE_CASE_ : Any = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
SCREAMING_SNAKE_CASE_ : Any = False
@property
def __A ( self ) -> Tuple:
return 32
@property
def __A ( self ) -> Optional[int]:
return 32
@property
def __A ( self ) -> List[str]:
return self.time_input_dim * 4
@property
def __A ( self ) -> Union[str, Any]:
return 8
@property
def __A ( self ) -> Any:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
SCREAMING_SNAKE_CASE = CLIPVisionModel(lowerCAmelCase__ )
return model
@property
def __A ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = CLIPImageProcessor(
crop_size=224 , do_center_crop=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ , do_resize=lowerCAmelCase__ , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
@property
def __A ( self ) -> str:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
SCREAMING_SNAKE_CASE = PriorTransformer(**lowerCAmelCase__ )
return model
@property
def __A ( self ) -> List[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE = ShapERenderer(**lowerCAmelCase__ )
return model
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE = self.dummy_prior
SCREAMING_SNAKE_CASE = self.dummy_image_encoder
SCREAMING_SNAKE_CASE = self.dummy_image_processor
SCREAMING_SNAKE_CASE = self.dummy_renderer
SCREAMING_SNAKE_CASE = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_024 , prediction_type='sample' , use_karras_sigmas=lowerCAmelCase__ , clip_sample=lowerCAmelCase__ , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> List[str]:
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
if str(lowerCAmelCase__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = 'cpu'
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE = output.images[0]
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> Union[str, Any]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = torch_device == 'cpu'
SCREAMING_SNAKE_CASE = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCAmelCase__ , relax_max_difference=lowerCAmelCase__ , )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCAmelCase__ )
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE = batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE = pipe(**lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Any:
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
SCREAMING_SNAKE_CASE = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(
lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
| 38 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> Dict:
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
_snake_case = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_SCREAMING_SNAKE_CASE ) )
return round(_SCREAMING_SNAKE_CASE , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 |
'''simple docstring'''
import os
from typing import Dict, List, Tuple, TypeVar, Union
__lowerCAmelCase = TypeVar('T')
__lowerCAmelCase = Union[List[T], Tuple[T, ...]]
__lowerCAmelCase = Union[T, List[T], Dict[str, T]]
__lowerCAmelCase = Union[str, bytes, os.PathLike] | 341 | 0 |
from math import sqrt
def __lowerCamelCase ( snake_case__ = 1_00_00_00 ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(snake_case__ ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"{solution() = }")
| 125 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCamelCase = get_logger(__name__)
class __UpperCAmelCase :
__snake_case : Tuple = "dummy_data"
__snake_case : List[Any] = "datasets"
__snake_case : List[Any] = False
def __init__( self: Any , UpperCAmelCase_: str , UpperCAmelCase_: str , UpperCAmelCase_: Union[Version, str] , UpperCAmelCase_: Optional[str] = None , UpperCAmelCase_: bool = False , UpperCAmelCase_: bool = True , UpperCAmelCase_: Optional[List[Callable]] = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = dataset_name
_SCREAMING_SNAKE_CASE = cache_dir
_SCREAMING_SNAKE_CASE = use_local_dummy_data
_SCREAMING_SNAKE_CASE = config
# download_callbacks take a single url as input
_SCREAMING_SNAKE_CASE = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_SCREAMING_SNAKE_CASE = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_SCREAMING_SNAKE_CASE = str(UpperCAmelCase_ )
# to be downloaded
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
@property
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
if self._dummy_file is None:
_SCREAMING_SNAKE_CASE = self.download_dummy_data()
return self._dummy_file
@property
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_SCREAMING_SNAKE_CASE = cached_path(
UpperCAmelCase_ , cache_dir=self.cache_dir , extract_compressed_file=UpperCAmelCase_ , force_extract=UpperCAmelCase_ )
return os.path.join(UpperCAmelCase_ , self.dummy_file_name )
@property
def UpperCamelCase ( self: Any ):
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
if self._bucket_url is None:
_SCREAMING_SNAKE_CASE = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def UpperCamelCase ( self: str , UpperCAmelCase_: str , *UpperCAmelCase_: Dict ):
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_SCREAMING_SNAKE_CASE = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_SCREAMING_SNAKE_CASE = self.dummy_file_name
# special case when data_url is a dict
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return self.create_dummy_data_dict(UpperCAmelCase_ , UpperCAmelCase_ )
elif isinstance(UpperCAmelCase_ , (list, tuple) ):
return self.create_dummy_data_list(UpperCAmelCase_ , UpperCAmelCase_ )
else:
return self.create_dummy_data_single(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase ( self: int , UpperCAmelCase_: Tuple , *UpperCAmelCase_: Any ):
'''simple docstring'''
return self.download_and_extract(UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: int , UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
return self.download_and_extract(UpperCAmelCase_ )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Dict , *UpperCAmelCase_: Tuple , **UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
return path
def UpperCamelCase ( self: str ):
'''simple docstring'''
return {}
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Dict , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
for single_url in single_urls:
download_callback(UpperCAmelCase_ )
else:
_SCREAMING_SNAKE_CASE = single_urls
download_callback(UpperCAmelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = [os.path.join(UpperCAmelCase_ , urllib.parse.quote_plus(Path(UpperCAmelCase_ ).name ) ) for x in single_urls]
else:
_SCREAMING_SNAKE_CASE = single_urls
_SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase_ , urllib.parse.quote_plus(Path(UpperCAmelCase_ ).name ) )
_SCREAMING_SNAKE_CASE = value
# make sure that values are unique
if all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
_SCREAMING_SNAKE_CASE = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def UpperCamelCase ( self: int , UpperCAmelCase_: Dict , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_SCREAMING_SNAKE_CASE = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , UpperCAmelCase_ ) ) for url in data_url )
_SCREAMING_SNAKE_CASE = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
_SCREAMING_SNAKE_CASE = [data_url[0]] * len(UpperCAmelCase_ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(UpperCAmelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase_ , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(UpperCAmelCase_ )
return dummy_data_list
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Dict , UpperCAmelCase_: List[Any] ):
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(UpperCAmelCase_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase_ , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(UpperCAmelCase_ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
def _iter_archive_members(UpperCAmelCase_: Any ):
# this preserves the order of the members inside the ZIP archive
_SCREAMING_SNAKE_CASE = Path(self.dummy_file ).parent
_SCREAMING_SNAKE_CASE = path.relative_to(UpperCAmelCase_ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
_SCREAMING_SNAKE_CASE = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = Path(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = _iter_archive_members(UpperCAmelCase_ ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(UpperCAmelCase_ ).as_posix(), file_path.open("""rb""" )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: List[Any] ):
'''simple docstring'''
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = [paths]
for path in paths:
if os.path.isfile(UpperCAmelCase_ ):
if os.path.basename(UpperCAmelCase_ ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(UpperCAmelCase_ ):
if os.path.basename(UpperCAmelCase_ ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(UpperCAmelCase_ ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
| 125 | 1 |
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ = ['''transformers''', '''torch''', '''note_seq''']
def __init__( self , *lowercase , **lowercase ):
"""simple docstring"""
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCAmelCase_ ( cls , *lowercase , **lowercase ):
"""simple docstring"""
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def lowerCAmelCase_ ( cls , *lowercase , **lowercase ):
"""simple docstring"""
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 140 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] , _A : TransformeraDModel , _A : AutoencoderKL , _A : KarrasDiffusionSchedulers , _A : Optional[Dict[int, str]] = None , ):
"""simple docstring"""
super().__init__()
self.register_modules(transformer=_A , vae=_A , scheduler=_A )
# create a imagenet -> id dictionary for easier use
__SCREAMING_SNAKE_CASE : Optional[int] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''',''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = int(_A )
__SCREAMING_SNAKE_CASE : List[str] = dict(sorted(self.labels.items() ) )
def UpperCAmelCase__ ( self : List[Any] , _A : Union[str, List[str]] ):
"""simple docstring"""
if not isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = list(_A )
for l in label:
if l not in self.labels:
raise ValueError(
F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Dict , _A : List[int] , _A : float = 4.0 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : int = 50 , _A : Optional[str] = "pil" , _A : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = len(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.transformer.config.sample_size
__SCREAMING_SNAKE_CASE : List[Any] = self.transformer.config.in_channels
__SCREAMING_SNAKE_CASE : Optional[int] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_A , device=self.device , dtype=self.transformer.dtype , )
__SCREAMING_SNAKE_CASE : Tuple = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(_A , device=self.device ).reshape(-1 )
__SCREAMING_SNAKE_CASE : Any = torch.tensor([1000] * batch_size , device=self.device )
__SCREAMING_SNAKE_CASE : Any = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE : Optional[Any] = latent_model_input[: len(_A ) // 2]
__SCREAMING_SNAKE_CASE : List[Any] = torch.cat([half, half] , dim=0 )
__SCREAMING_SNAKE_CASE : int = self.scheduler.scale_model_input(_A , _A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = t
if not torch.is_tensor(_A ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__SCREAMING_SNAKE_CASE : Any = latent_model_input.device.type == '''mps'''
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : List[Any] = torch.floataa if is_mps else torch.floataa
else:
__SCREAMING_SNAKE_CASE : int = torch.intaa if is_mps else torch.intaa
__SCREAMING_SNAKE_CASE : int = torch.tensor([timesteps] , dtype=_A , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__SCREAMING_SNAKE_CASE : Optional[int] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.transformer(
_A , timestep=_A , class_labels=_A ).sample
# perform guidance
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = torch.split(_A , len(_A ) // 2 , dim=0 )
__SCREAMING_SNAKE_CASE : str = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__SCREAMING_SNAKE_CASE : List[Any] = torch.cat([half_eps, half_eps] , dim=0 )
__SCREAMING_SNAKE_CASE : List[str] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = torch.split(_A , _A , dim=1 )
else:
__SCREAMING_SNAKE_CASE : List[Any] = noise_pred
# compute previous image: x_t -> x_t-1
__SCREAMING_SNAKE_CASE : str = self.scheduler.step(_A , _A , _A ).prev_sample
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = latent_model_input.chunk(2 , dim=0 )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = latent_model_input
__SCREAMING_SNAKE_CASE : List[Any] = 1 / self.vae.config.scaling_factor * latents
__SCREAMING_SNAKE_CASE : List[str] = self.vae.decode(_A ).sample
__SCREAMING_SNAKE_CASE : Any = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__SCREAMING_SNAKE_CASE : int = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE : str = self.numpy_to_pil(_A )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_A )
| 303 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a_ : Any = logging.get_logger(__name__)
class _snake_case ( A__ ):
_lowercase : Union[str, Any] = ['''pixel_values''']
def __init__( self , a = True , a = None , a = PIL.Image.BICUBIC , a = True , a = None , a = 1 / 255 , a = True , a = True , a = None , a = None , **a , ) -> None:
super().__init__(**a)
SCREAMING_SNAKE_CASE = size if size is not None else {'height': 256, 'width': 256}
SCREAMING_SNAKE_CASE = get_size_dict(a)
SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {'height': 224, 'width': 224}
SCREAMING_SNAKE_CASE = get_size_dict(a , param_name='crop_size')
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = resample
SCREAMING_SNAKE_CASE = do_center_crop
SCREAMING_SNAKE_CASE = crop_size
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = rescale_factor
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE__ ( self , a , a , a = PIL.Image.BICUBIC , a = None , **a , ) -> np.ndarray:
SCREAMING_SNAKE_CASE = get_size_dict(a)
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''')
return resize(
a , size=(size['height'], size['width']) , resample=a , data_format=a , **a)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a = None , **a , ) -> np.ndarray:
SCREAMING_SNAKE_CASE = get_size_dict(a)
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''')
return center_crop(a , size=(size['height'], size['width']) , data_format=a , **a)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a = None , **a , ) -> str:
return rescale(a , scale=a , data_format=a , **a)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a = None , **a , ) -> np.ndarray:
return normalize(a , mean=a , std=a , data_format=a , **a)
def SCREAMING_SNAKE_CASE__ ( self , a , a = None , a = None , a=None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ) -> PIL.Image.Image:
SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE = size if size is not None else self.size
SCREAMING_SNAKE_CASE = get_size_dict(a)
SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE = get_size_dict(a , param_name='crop_size')
SCREAMING_SNAKE_CASE = make_list_of_images(a)
if not valid_images(a):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE = [to_numpy_array(a) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE = [self.resize(image=a , size=a , resample=a) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE = [self.center_crop(image=a , size=a) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE = [self.rescale(image=a , scale=a) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE = [self.normalize(image=a , mean=a , std=a) for image in images]
SCREAMING_SNAKE_CASE = [to_channel_dimension_format(a , a) for image in images]
SCREAMING_SNAKE_CASE = {'pixel_values': images}
return BatchFeature(data=a , tensor_type=a)
| 355 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
a_ : Optional[int] = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
a_ : Optional[int] = {
'169M': 7_68,
'430M': 10_24,
'1B5': 20_48,
'3B': 25_60,
'7B': 40_96,
'14B': 51_20,
}
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = list(state_dict.keys())
for name in state_dict_keys:
SCREAMING_SNAKE_CASE = state_dict.pop(_UpperCAmelCase)
# emb -> embedding
if name.startswith('emb.'):
SCREAMING_SNAKE_CASE = name.replace('emb.' , 'embeddings.')
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0'):
SCREAMING_SNAKE_CASE = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln')
# att -> attention
SCREAMING_SNAKE_CASE = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , _UpperCAmelCase)
# ffn -> feed_forward
SCREAMING_SNAKE_CASE = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , _UpperCAmelCase)
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k'):
SCREAMING_SNAKE_CASE = name.replace('.time_mix_k' , '.time_mix_key')
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v'):
SCREAMING_SNAKE_CASE = name.replace('.time_mix_v' , '.time_mix_value')
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r'):
SCREAMING_SNAKE_CASE = name.replace('.time_mix_r' , '.time_mix_receptance')
if name != "head.weight":
SCREAMING_SNAKE_CASE = 'rwkv.' + name
SCREAMING_SNAKE_CASE = weight
return state_dict
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=None):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.')
SCREAMING_SNAKE_CASE = 5_0277
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b')
else:
SCREAMING_SNAKE_CASE = PreTrainedTokenizerFast(tokenizer_file=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
tokenizer.save_pretrained(_UpperCAmelCase)
# 2. Build the config
SCREAMING_SNAKE_CASE = list(NUM_HIDDEN_LAYERS_MAPPING.keys())
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
SCREAMING_SNAKE_CASE = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.')
if size not in possible_sizes:
raise ValueError(F'''`size` should be one of {possible_sizes}, got {size}.''')
SCREAMING_SNAKE_CASE = RwkvConfig(
vocab_size=_UpperCAmelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(_UpperCAmelCase)
# 3. Download model file then convert state_dict
SCREAMING_SNAKE_CASE = hf_hub_download(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = torch.load(_UpperCAmelCase , map_location='cpu')
SCREAMING_SNAKE_CASE = convert_state_dict(_UpperCAmelCase)
# 4. Split in shards and save
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = shard_checkpoint(_UpperCAmelCase)
for shard_file, shard in shards.items():
torch.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase))
if index is not None:
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , _UpperCAmelCase)
# Save the index as well
with open(_UpperCAmelCase , 'w' , encoding='utf-8') as f:
SCREAMING_SNAKE_CASE = json.dumps(_UpperCAmelCase , indent=2 , sort_keys=_UpperCAmelCase) + '\n'
f.write(_UpperCAmelCase)
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.')
SCREAMING_SNAKE_CASE = list(shards.keys())
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
SCREAMING_SNAKE_CASE = torch.load(os.path.join(_UpperCAmelCase , _UpperCAmelCase))
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_UpperCAmelCase , _UpperCAmelCase))
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.')
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(_UpperCAmelCase)
model.push_to_hub(_UpperCAmelCase , max_shard_size='2GB')
tokenizer.push_to_hub(_UpperCAmelCase)
if __name__ == "__main__":
a_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
a_ : Tuple = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 327 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.