code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
class UpperCamelCase_ :
def __init__( self , A ) -> None:
UpperCAmelCase : Optional[Any] = value
UpperCAmelCase : Node | None = None
UpperCAmelCase : Node | None = None
class UpperCamelCase_ :
def __init__( self , A ) -> None:
UpperCAmelCase : int = tree
def _lowercase( self , A ) -> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ) -> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
'''simple docstring'''
a : Tuple = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def __lowerCamelCase ( _lowercase ) -> int:
UpperCAmelCase : str = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0]
number //= 1_0_0_0_0_0
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
a : Optional[Any] = True
a : List[Any] = False
def __lowerCamelCase ( _lowercase ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCAmelCase : List[str] = chain(next_number(_lowercase ) )
UpperCAmelCase : Tuple = number_chain
while number < 1_0_0_0_0_0_0_0:
UpperCAmelCase : List[str] = number_chain
number *= 1_0
return number_chain
def __lowerCamelCase ( _lowercase = 1_0_0_0_0_0_0_0 ) -> int:
for i in range(1 , _lowercase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution() = }''')
| 672 | 0 |
from __future__ import annotations
def __lowerCamelCase ( _lowercase ) -> list[int]:
UpperCAmelCase : Optional[int] = 2
UpperCAmelCase : Optional[Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_lowercase )
if n > 1:
factors.append(_lowercase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : Optional[Any] = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 0 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class UpperCamelCase_ ( datasets.BeamBasedBuilder ):
def _lowercase( self ) -> Optional[Any]:
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=A , )
def _lowercase( self , A , A ) -> Dict:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def _lowercase( self , A , A ) -> List[str]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A )
class UpperCamelCase_ ( datasets.BeamBasedBuilder ):
def _lowercase( self ) -> Dict:
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=A , )
def _lowercase( self , A , A ) -> Optional[Any]:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def _lowercase( self , A , A ) -> Optional[int]:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(A )
def __lowerCamelCase ( ) -> Any:
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def __lowerCamelCase ( ) -> int:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class UpperCamelCase_ ( __magic_name__ ):
@require_beam
def _lowercase( self ) -> List[str]:
UpperCAmelCase : int = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase : Any = DummyBeamDataset(cache_dir=A , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A , builder.name , """default""" , """0.0.0""" , f'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
UpperCAmelCase : List[str] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def _lowercase( self ) -> Any:
import apache_beam as beam
UpperCAmelCase : List[str] = beam.io.parquetio.WriteToParquet
UpperCAmelCase : Union[str, Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase : Union[str, Any] = DummyBeamDataset(cache_dir=A , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
UpperCAmelCase : List[Any] = partial(A , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
A , builder.name , """default""" , """0.0.0""" , f'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
A , builder.name , """default""" , """0.0.0""" , f'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
UpperCAmelCase : Dict = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(A , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def _lowercase( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase : Tuple = DummyBeamDataset(cache_dir=A )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def _lowercase( self ) -> int:
UpperCAmelCase : Tuple = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase : Any = NestedBeamDataset(cache_dir=A , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(A , builder.name , """default""" , """0.0.0""" , f'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
UpperCAmelCase : Optional[Any] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , A )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , A )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(A , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 716 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def __lowerCamelCase ( _lowercase , _lowercase = True , _lowercase = math.inf , _lowercase = -math.inf , _lowercase = math.inf , _lowercase = -math.inf , _lowercase = False , _lowercase = 1_0_0 , _lowercase = 0.01 , _lowercase = 1 , ) -> Any:
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Any = search_prob
UpperCAmelCase : Any = start_temperate
UpperCAmelCase : Optional[Any] = []
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : Optional[Any] = None
while not search_end:
UpperCAmelCase : List[str] = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCAmelCase : List[Any] = current_state
scores.append(_lowercase )
iterations += 1
UpperCAmelCase : Dict = None
UpperCAmelCase : Union[str, Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCAmelCase : int = random.randint(0 , len(_lowercase ) - 1 ) # picking a random neighbor
UpperCAmelCase : int = neighbors.pop(_lowercase )
UpperCAmelCase : Tuple = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCAmelCase : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCAmelCase : int = picked_neighbor
else:
UpperCAmelCase : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCAmelCase : Optional[int] = picked_neighbor
UpperCAmelCase : List[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCAmelCase : Optional[int] = True
else:
UpperCAmelCase : Optional[int] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_lowercase ) , _lowercase )
plt.xlabel("""Iterations""" )
plt.ylabel("""Function values""" )
plt.show()
return best_state
if __name__ == "__main__":
def __lowerCamelCase ( _lowercase , _lowercase ) -> str:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
a : Dict = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
a : Dict = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
a : List[str] = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
a : Dict = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]:
return (3 * x**2) - (6 * y)
a : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a : Any = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
a : List[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a : Dict = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F'''{local_min.score()}'''
)
| 672 | 0 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
a : List[Any] = logging.get_logger(__name__)
a : str = """T5Config"""
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> jnp.ndarray:
UpperCAmelCase : Tuple = jnp.zeros_like(_lowercase )
UpperCAmelCase : int = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
UpperCAmelCase : Any = shifted_input_ids.at[:, 0].set(_lowercase )
UpperCAmelCase : Tuple = jnp.where(shifted_input_ids == -1_0_0 , _lowercase , _lowercase )
return shifted_input_ids
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'mt5'
lowercase = MTaConfig
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'mt5'
lowercase = MTaConfig
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'mt5'
lowercase = MTaConfig
| 717 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : Any = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 0 |
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = AlbertTokenizer
lowercase = AlbertTokenizerFast
lowercase = True
lowercase = True
lowercase = True
def _lowercase( self ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : Union[str, Any] = AlbertTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase( self , A ) -> int:
UpperCAmelCase : str = """this is a test"""
UpperCAmelCase : Any = """this is a test"""
return input_text, output_text
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : int = """<pad>"""
UpperCAmelCase : Any = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(A ) , 30000 )
def _lowercase( self ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def _lowercase( self ) -> Optional[int]:
if not self.test_rust_tokenizer:
return
UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase : Optional[int] = self.get_rust_tokenizer()
UpperCAmelCase : Optional[int] = """I was born in 92000, and this is falsé."""
UpperCAmelCase : List[Any] = tokenizer.tokenize(A )
UpperCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
UpperCAmelCase : Optional[int] = tokenizer.encode(A , add_special_tokens=A )
UpperCAmelCase : Tuple = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
UpperCAmelCase : Union[str, Any] = self.get_rust_tokenizer()
UpperCAmelCase : Dict = tokenizer.encode(A )
UpperCAmelCase : List[Any] = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : List[Any] = AlbertTokenizer(A , keep_accents=A )
UpperCAmelCase : Union[str, Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [48, 25, 21, 1289] )
UpperCAmelCase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
UpperCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
UpperCAmelCase : Any = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : int = AlbertTokenizer(A )
UpperCAmelCase : Dict = tokenizer.encode("""sequence builders""" )
UpperCAmelCase : int = tokenizer.encode("""multi-sequence build""" )
UpperCAmelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(A )
UpperCAmelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _lowercase( self ) -> Union[str, Any]:
# fmt: off
UpperCAmelCase : Union[str, Any] = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 718 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
a : Tuple = False
class UpperCamelCase_ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Any = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
UpperCAmelCase : List[Any] = torch.manual_seed(0 )
UpperCAmelCase : List[str] = pipe(
image=A , generator=A , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
UpperCAmelCase : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase : List[str] = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 672 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
a : Union[str, Any] = logging.get_logger(__name__)
a : Dict = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a : List[str] = {
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
a : Optional[Any] = {
"""junnyu/roformer_chinese_small""": 1_5_3_6,
"""junnyu/roformer_chinese_base""": 1_5_3_6,
"""junnyu/roformer_chinese_char_small""": 5_1_2,
"""junnyu/roformer_chinese_char_base""": 5_1_2,
"""junnyu/roformer_small_discriminator""": 1_2_8,
"""junnyu/roformer_small_generator""": 1_2_8,
}
a : int = {
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PRETRAINED_INIT_CONFIGURATION
lowercase = RoFormerTokenizer
def __init__( self , A=None , A=None , A=True , A="[UNK]" , A="[SEP]" , A="[PAD]" , A="[CLS]" , A="[MASK]" , A=True , A=None , **A , ) -> Tuple:
super().__init__(
A , tokenizer_file=A , do_lower_case=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , tokenize_chinese_chars=A , strip_accents=A , **A , )
UpperCAmelCase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , A ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , A ) != strip_accents
):
UpperCAmelCase : Any = getattr(A , pre_tok_state.pop("""type""" ) )
UpperCAmelCase : str = do_lower_case
UpperCAmelCase : Optional[Any] = strip_accents
UpperCAmelCase : Optional[int] = pre_tok_class(**A )
UpperCAmelCase : str = do_lower_case
def __getstate__( self ) -> Any:
UpperCAmelCase : List[Any] = self.__dict__.copy()
UpperCAmelCase : Tuple = BertPreTokenizer()
return state
def __setstate__( self , A ) -> List[Any]:
UpperCAmelCase : int = d
UpperCAmelCase : List[str] = self.__dict__["""_tokenizer"""].get_vocab()
UpperCAmelCase : Optional[int] = PreTokenizer.custom(JiebaPreTokenizer(A ) )
def _lowercase( self , A , A=None ) -> Tuple:
UpperCAmelCase : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase( self , A , A = None ) -> List[int]:
UpperCAmelCase : Optional[Any] = [self.sep_token_id]
UpperCAmelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase( self , A , A = None ) -> Tuple[str]:
UpperCAmelCase : Dict = self._tokenizer.model.save(A , name=A )
return tuple(A )
def _lowercase( self , A , A=None , A=None , A=False , **A , ) -> List[Any]:
UpperCAmelCase : Optional[int] = BertPreTokenizer()
return super().save_pretrained(A , A , A , A , **A )
| 719 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a : Any = get_logger()
a : Optional[dict] = None
class UpperCamelCase_ ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self , A=None , A=None , **A ) -> str:
super().__init__(features=A )
import jax
from jaxlib.xla_client import Device
if isinstance(A , A ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(A )}, as `jaxlib.xla_extension.Device` '''
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
UpperCAmelCase : Optional[int] = device if isinstance(A , A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
UpperCAmelCase : List[Any] = str(jax.devices()[0] )
UpperCAmelCase : Union[str, Any] = jnp_array_kwargs
@staticmethod
def _lowercase( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
import jax
return {str(A ): device for device in jax.devices()}
def _lowercase( self , A ) -> str:
import jax
import jax.numpy as jnp
if isinstance(A , A ) and column:
if all(
isinstance(A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(A , axis=0 )
return column
def _lowercase( self , A ) -> Tuple:
import jax
import jax.numpy as jnp
if isinstance(A , (str, bytes, type(A )) ):
return value
elif isinstance(A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCAmelCase : List[str] = {}
if isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
UpperCAmelCase : str = {"""dtype""": jnp.intaa}
else:
UpperCAmelCase : int = {"""dtype""": jnp.intaa}
elif isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCAmelCase : Any = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(A , PIL.Image.Image ):
UpperCAmelCase : List[str] = np.asarray(A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase : Dict = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(A , **{**default_dtype, **self.jnp_array_kwargs} )
def _lowercase( self , A ) -> Tuple:
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(A , """__array__""" ) and not isinstance(A , jax.Array ):
UpperCAmelCase : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
elif isinstance(A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
return self._tensorize(A )
def _lowercase( self , A ) -> Dict:
return map_nested(self._recursive_tensorize , A , map_list=A )
def _lowercase( self , A ) -> Mapping:
UpperCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_row(A )
UpperCAmelCase : Dict = self.python_features_decoder.decode_row(A )
return self.recursive_tensorize(A )
def _lowercase( self , A ) -> "jax.Array":
UpperCAmelCase : int = self.numpy_arrow_extractor().extract_column(A )
UpperCAmelCase : Optional[Any] = self.python_features_decoder.decode_column(A , pa_table.column_names[0] )
UpperCAmelCase : Optional[int] = self.recursive_tensorize(A )
UpperCAmelCase : Any = self._consolidate(A )
return column
def _lowercase( self , A ) -> Mapping:
UpperCAmelCase : Optional[int] = self.numpy_arrow_extractor().extract_batch(A )
UpperCAmelCase : List[str] = self.python_features_decoder.decode_batch(A )
UpperCAmelCase : Union[str, Any] = self.recursive_tensorize(A )
for column_name in batch:
UpperCAmelCase : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 672 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
a : Tuple = logging.get_logger("""transformers.models.speecht5""")
a : Any = {
"""speech_encoder_prenet.layer_norm""": """speecht5.encoder.prenet.feature_projection.layer_norm""",
"""speech_encoder_prenet.post_extract_proj""": """speecht5.encoder.prenet.feature_projection.projection""",
"""speech_encoder_prenet.pos_conv.0""": """speecht5.encoder.prenet.pos_conv_embed.conv""",
"""speech_encoder_prenet.mask_emb""": """speecht5.encoder.prenet.masked_spec_embed""",
}
a : Union[str, Any] = {
"""text_encoder_prenet.encoder_prenet.0""": """speecht5.encoder.prenet.embed_tokens""",
"""text_encoder_prenet.encoder_prenet.1.alpha""": """speecht5.encoder.prenet.encode_positions.alpha""",
}
a : str = {
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0""": """speecht5.decoder.prenet.layers.0""",
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0""": """speecht5.decoder.prenet.layers.1""",
"""speech_decoder_prenet.decoder_prenet.0.1""": """speecht5.decoder.prenet.final_layer""",
"""speech_decoder_prenet.decoder_prenet.1.alpha""": """speecht5.decoder.prenet.encode_positions.alpha""",
"""speech_decoder_prenet.spkembs_layer.0""": """speecht5.decoder.prenet.speaker_embeds_layer""",
}
a : Dict = {
"""speech_decoder_postnet.feat_out""": """speech_decoder_postnet.feat_out""",
"""speech_decoder_postnet.prob_out""": """speech_decoder_postnet.prob_out""",
"""speech_decoder_postnet.postnet.postnet.0.0""": """speech_decoder_postnet.layers.0.conv""",
"""speech_decoder_postnet.postnet.postnet.0.1""": """speech_decoder_postnet.layers.0.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.1.0""": """speech_decoder_postnet.layers.1.conv""",
"""speech_decoder_postnet.postnet.postnet.1.1""": """speech_decoder_postnet.layers.1.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.2.0""": """speech_decoder_postnet.layers.2.conv""",
"""speech_decoder_postnet.postnet.postnet.2.1""": """speech_decoder_postnet.layers.2.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.3.0""": """speech_decoder_postnet.layers.3.conv""",
"""speech_decoder_postnet.postnet.postnet.3.1""": """speech_decoder_postnet.layers.3.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.4.0""": """speech_decoder_postnet.layers.4.conv""",
"""speech_decoder_postnet.postnet.postnet.4.1""": """speech_decoder_postnet.layers.4.batch_norm""",
}
a : Union[str, Any] = {
"""text_decoder_prenet.embed_tokens""": """speecht5.decoder.prenet.embed_tokens""",
}
a : List[str] = {
"""text_decoder_postnet.output_projection""": """text_decoder_postnet.lm_head""",
}
a : List[str] = {
"""encoder.layers.*.self_attn.k_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj""",
"""encoder.layers.*.self_attn.v_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj""",
"""encoder.layers.*.self_attn.q_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj""",
"""encoder.layers.*.self_attn.out_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj""",
"""encoder.layers.*.self_attn_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.layer_norm""",
"""encoder.layers.*.fc1""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense""",
"""encoder.layers.*.fc2""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense""",
"""encoder.layers.*.final_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """speecht5.encoder.wrapped_encoder.layer_norm""",
"""encoder.pos_emb.pe_k""": """speecht5.encoder.wrapped_encoder.embed_positions.pe_k""",
}
a : Union[str, Any] = {
"""decoder.layers.*.self_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj""",
"""decoder.layers.*.self_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj""",
"""decoder.layers.*.self_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj""",
"""decoder.layers.*.self_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj""",
"""decoder.layers.*.self_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm""",
"""decoder.layers.*.encoder_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj""",
"""decoder.layers.*.encoder_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj""",
"""decoder.layers.*.encoder_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj""",
"""decoder.layers.*.encoder_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj""",
"""decoder.layers.*.encoder_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm""",
"""decoder.layers.*.fc1""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense""",
"""decoder.layers.*.fc2""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense""",
"""decoder.layers.*.final_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm""",
}
a : Union[str, Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
a : Any = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
a : Tuple = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
a : Any = []
a : List[str] = [
"""encoder.version""",
"""encoder.layers.*.norm_k.weight""",
"""encoder.layers.*.norm_k.bias""",
"""decoder.version""",
"""decoder.layers.*.norm_k.weight""",
"""decoder.layers.*.norm_k.bias""",
"""decoder.pos_emb.pe_k""",
"""speech_encoder_prenet.embed_positions._float_tensor""",
"""text_decoder_prenet.embed_positions._float_tensor""",
]
a : Optional[Any] = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""speech_decoder_prenet.*""",
"""speech_decoder_postnet.*""",
]
a : Optional[int] = IGNORE_KEYS + [
"""encoder.proj""",
"""speech_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
a : Optional[Any] = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
for attribute in key.split(""".""" ):
UpperCAmelCase : str = getattr(_lowercase , _lowercase )
if weight_type is not None:
UpperCAmelCase : Optional[Any] = getattr(_lowercase , _lowercase ).shape
else:
UpperCAmelCase : Tuple = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCAmelCase : Any = value
elif weight_type == "weight_g":
UpperCAmelCase : List[str] = value
elif weight_type == "weight_v":
UpperCAmelCase : Dict = value
elif weight_type == "bias":
UpperCAmelCase : List[str] = value
elif weight_type == "running_mean":
UpperCAmelCase : Any = value
elif weight_type == "running_var":
UpperCAmelCase : Tuple = value
elif weight_type == "num_batches_tracked":
UpperCAmelCase : Any = value
else:
UpperCAmelCase : Union[str, Any] = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def __lowerCamelCase ( _lowercase , _lowercase ) -> List[Any]:
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
UpperCAmelCase : Union[str, Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]:
UpperCAmelCase : Optional[Any] = []
if task == "s2t":
UpperCAmelCase : List[str] = hf_model.speechta.encoder.prenet.feature_encoder
UpperCAmelCase : Any = MAPPING_S2T
UpperCAmelCase : List[Any] = IGNORE_KEYS_S2T
elif task == "t2s":
UpperCAmelCase : Optional[int] = None
UpperCAmelCase : Optional[Any] = MAPPING_T2S
UpperCAmelCase : Any = IGNORE_KEYS_T2S
elif task == "s2s":
UpperCAmelCase : List[Any] = hf_model.speechta.encoder.prenet.feature_encoder
UpperCAmelCase : List[Any] = MAPPING_S2S
UpperCAmelCase : Dict = IGNORE_KEYS_S2S
else:
raise ValueError(F'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(_lowercase , _lowercase ):
logger.info(F'''{name} was ignored''' )
continue
UpperCAmelCase : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
_lowercase , _lowercase , _lowercase , _lowercase , hf_model.config.feat_extract_norm == """group""" , )
UpperCAmelCase : List[str] = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
UpperCAmelCase : Union[str, Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
UpperCAmelCase : List[str] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
UpperCAmelCase : Dict = True
if "*" in mapped_key:
UpperCAmelCase : Tuple = name.split(_lowercase )[0].split(""".""" )[-2]
UpperCAmelCase : Optional[Any] = mapped_key.replace("""*""" , _lowercase )
if "weight_g" in name:
UpperCAmelCase : List[Any] = """weight_g"""
elif "weight_v" in name:
UpperCAmelCase : List[str] = """weight_v"""
elif "bias" in name:
UpperCAmelCase : List[str] = """bias"""
elif "weight" in name:
UpperCAmelCase : str = """weight"""
elif "running_mean" in name:
UpperCAmelCase : Dict = """running_mean"""
elif "running_var" in name:
UpperCAmelCase : str = """running_var"""
elif "num_batches_tracked" in name:
UpperCAmelCase : str = """num_batches_tracked"""
else:
UpperCAmelCase : Optional[Any] = None
set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
continue
if not is_used:
unused_weights.append(_lowercase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
UpperCAmelCase : Tuple = full_name.split("""conv_layers.""" )[-1]
UpperCAmelCase : Any = name.split(""".""" )
UpperCAmelCase : List[Any] = int(items[0] )
UpperCAmelCase : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
UpperCAmelCase : Tuple = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
UpperCAmelCase : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
UpperCAmelCase : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
UpperCAmelCase : Tuple = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_lowercase )
@torch.no_grad()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , ) -> Union[str, Any]:
if config_path is not None:
UpperCAmelCase : Any = SpeechTaConfig.from_pretrained(_lowercase )
else:
UpperCAmelCase : str = SpeechTaConfig()
if task == "s2t":
UpperCAmelCase : Optional[Any] = config.max_text_positions
UpperCAmelCase : int = SpeechTaForSpeechToText(_lowercase )
elif task == "t2s":
UpperCAmelCase : Dict = 1_8_7_6
UpperCAmelCase : Any = 6_0_0
UpperCAmelCase : Optional[Any] = config.max_speech_positions
UpperCAmelCase : List[str] = SpeechTaForTextToSpeech(_lowercase )
elif task == "s2s":
UpperCAmelCase : List[Any] = 1_8_7_6
UpperCAmelCase : Optional[Any] = config.max_speech_positions
UpperCAmelCase : List[Any] = SpeechTaForSpeechToSpeech(_lowercase )
else:
raise ValueError(F'''Unknown task name: {task}''' )
if vocab_path:
UpperCAmelCase : Optional[int] = SpeechTaTokenizer(_lowercase , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
UpperCAmelCase : Optional[Any] = AddedToken("""<mask>""" , lstrip=_lowercase , rstrip=_lowercase )
UpperCAmelCase : Optional[int] = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
UpperCAmelCase : List[Any] = SpeechTaFeatureExtractor()
UpperCAmelCase : List[Any] = SpeechTaProcessor(tokenizer=_lowercase , feature_extractor=_lowercase )
processor.save_pretrained(_lowercase )
UpperCAmelCase : str = torch.load(_lowercase )
recursively_load_weights(fairseq_checkpoint["""model"""] , _lowercase , _lowercase )
model.save_pretrained(_lowercase )
if repo_id:
print("""Pushing to the hub...""" )
processor.push_to_hub(_lowercase )
model.push_to_hub(_lowercase )
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--task""",
default="""s2t""",
type=str,
help="""Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--vocab_path""", default=None, type=str, help="""Path to SentencePiece model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
a : Tuple = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 720 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
a : int = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def __lowerCamelCase ( ) -> Dict:
UpperCAmelCase : str = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCAmelCase : Dict = g.get_repo("""huggingface/transformers""" )
UpperCAmelCase : int = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCAmelCase : Optional[int] = sorted([comment for comment in issue.get_comments()] , key=lambda _lowercase : i.created_at , reverse=_lowercase )
UpperCAmelCase : Any = comments[0] if len(_lowercase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 672 | 0 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Tuple = {"""vocab_file""": """spiece.model"""}
a : Optional[Any] = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
}
}
a : Dict = {
"""google/bigbird-roberta-base""": 4_0_9_6,
"""google/bigbird-roberta-large""": 4_0_9_6,
"""google/bigbird-base-trivia-itc""": 4_0_9_6,
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
lowercase = []
def __init__( self , A , A="<unk>" , A="<s>" , A="</s>" , A="<pad>" , A="[SEP]" , A="[MASK]" , A="[CLS]" , A = None , **A , ) -> None:
UpperCAmelCase : int = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else bos_token
UpperCAmelCase : Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else eos_token
UpperCAmelCase : Optional[Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
UpperCAmelCase : Union[str, Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else pad_token
UpperCAmelCase : Any = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else cls_token
UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : Union[str, Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
UpperCAmelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , pad_token=A , sep_token=A , mask_token=A , cls_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
UpperCAmelCase : Union[str, Any] = vocab_file
UpperCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def _lowercase( self ) -> Optional[int]:
return self.sp_model.get_piece_size()
def _lowercase( self ) -> List[str]:
UpperCAmelCase : int = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[Any]:
UpperCAmelCase : int = self.__dict__.copy()
UpperCAmelCase : Union[str, Any] = None
return state
def __setstate__( self , A ) -> int:
UpperCAmelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCAmelCase : int = {}
UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase( self , A ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def _lowercase( self , A ) -> Union[str, Any]:
return self.sp_model.piece_to_id(A )
def _lowercase( self , A ) -> int:
UpperCAmelCase : Dict = self.sp_model.IdToPiece(A )
return token
def _lowercase( self , A ) -> List[Any]:
UpperCAmelCase : int = []
UpperCAmelCase : int = """"""
UpperCAmelCase : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A ) + token
UpperCAmelCase : List[Any] = True
UpperCAmelCase : Optional[int] = []
else:
current_sub_tokens.append(A )
UpperCAmelCase : Dict = False
out_string += self.sp_model.decode(A )
return out_string.strip()
def _lowercase( self , A , A = False , A = None , A = True , **A , ) -> str:
UpperCAmelCase : Tuple = kwargs.pop("""use_source_tokenizer""" , A )
UpperCAmelCase : str = self.convert_ids_to_tokens(A , skip_special_tokens=A )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCAmelCase : Tuple = []
UpperCAmelCase : Dict = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(A ) )
UpperCAmelCase : int = []
sub_texts.append(A )
else:
current_sub_text.append(A )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(A ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
UpperCAmelCase : List[Any] = re.sub(r""" (\[(MASK|SEP)\])""" , r"""\1""" , """ """.join(A ) )
else:
UpperCAmelCase : str = """""".join(A )
UpperCAmelCase : List[str] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCAmelCase : List[Any] = self.clean_up_tokenization(A )
return clean_text
else:
return text
def _lowercase( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , """wb""" ) as fi:
UpperCAmelCase : Tuple = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
def _lowercase( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : int = [self.cls_token_id]
UpperCAmelCase : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def _lowercase( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
def _lowercase( self , A , A = None ) -> List[int]:
UpperCAmelCase : int = [self.sep_token_id]
UpperCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 721 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase_ :
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=False , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=3 , A=4 , A=None , ) -> Any:
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : List[Any] = batch_size
UpperCAmelCase : Union[str, Any] = seq_length
UpperCAmelCase : Optional[int] = is_training
UpperCAmelCase : str = use_input_mask
UpperCAmelCase : Optional[int] = use_token_type_ids
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : str = vocab_size
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : str = hidden_act
UpperCAmelCase : Any = hidden_dropout_prob
UpperCAmelCase : str = attention_probs_dropout_prob
UpperCAmelCase : Tuple = max_position_embeddings
UpperCAmelCase : Optional[Any] = type_vocab_size
UpperCAmelCase : Optional[Any] = type_sequence_label_size
UpperCAmelCase : str = initializer_range
UpperCAmelCase : List[Any] = num_labels
UpperCAmelCase : Dict = num_choices
UpperCAmelCase : Tuple = scope
def _lowercase( self ) -> Dict:
UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[Any] = None
if self.use_input_mask:
UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Optional[Any] = None
if self.use_token_type_ids:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Dict = None
UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase( self ) -> Dict:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , use_stable_embedding=A , )
def _lowercase( self , A , A , A , A , A , A , A ) -> str:
UpperCAmelCase : Union[str, Any] = OpenLlamaModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Dict = model(A , attention_mask=A )
UpperCAmelCase : Optional[int] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> List[Any]:
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : Union[str, Any] = OpenLlamaModel(A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , )
UpperCAmelCase : str = model(
A , attention_mask=A , encoder_hidden_states=A , )
UpperCAmelCase : List[Any] = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> int:
UpperCAmelCase : Optional[int] = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase( self , A , A , A , A , A , A , A , A , A , ) -> int:
UpperCAmelCase : Dict = True
UpperCAmelCase : Tuple = True
UpperCAmelCase : str = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
UpperCAmelCase : Union[str, Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , use_cache=A , )
UpperCAmelCase : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase : List[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , output_hidden_states=A , )["""hidden_states"""][0]
UpperCAmelCase : Optional[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , past_key_values=A , output_hidden_states=A , )["""hidden_states"""][0]
# select random slice
UpperCAmelCase : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Tuple = config_and_inputs
UpperCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
lowercase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
lowercase = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Dict = OpenLlamaModelTester(self )
UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , hidden_size=37 )
def _lowercase( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowercase( self ) -> int:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> str:
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase : int = type
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> str:
UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : List[str] = 3
UpperCAmelCase : Optional[Any] = input_dict["""input_ids"""]
UpperCAmelCase : str = input_ids.ne(1 ).to(A )
UpperCAmelCase : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase : Optional[Any] = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : List[Any] = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = 3
UpperCAmelCase : Any = """single_label_classification"""
UpperCAmelCase : Dict = input_dict["""input_ids"""]
UpperCAmelCase : Optional[Any] = input_ids.ne(1 ).to(A )
UpperCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase : Optional[Any] = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase( self ) -> int:
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Tuple = 3
UpperCAmelCase : Optional[Any] = """multi_label_classification"""
UpperCAmelCase : Dict = input_dict["""input_ids"""]
UpperCAmelCase : int = input_ids.ne(1 ).to(A )
UpperCAmelCase : int = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase : Any = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : Dict = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def _lowercase( self ) -> Dict:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _lowercase( self , A ) -> str:
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = ids_tensor([1, 10] , config.vocab_size )
UpperCAmelCase : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Any = OpenLlamaModel(A )
original_model.to(A )
original_model.eval()
UpperCAmelCase : List[str] = original_model(A ).last_hidden_state
UpperCAmelCase : List[Any] = original_model(A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase : Union[str, Any] = {"""type""": scaling_type, """factor""": 1_0.0}
UpperCAmelCase : str = OpenLlamaModel(A )
scaled_model.to(A )
scaled_model.eval()
UpperCAmelCase : List[str] = scaled_model(A ).last_hidden_state
UpperCAmelCase : Optional[int] = scaled_model(A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A , A , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
| 672 | 0 |
import random
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : float , _UpperCamelCase : bool = False ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {i: [] for i in range(_UpperCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_UpperCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_UpperCamelCase ):
for j in range(i + 1 , _UpperCamelCase ):
if random.random() < probability:
graph[i].append(_UpperCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_UpperCamelCase )
return graph
def __lowerCAmelCase ( _UpperCamelCase : int ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(_UpperCamelCase ) if i != j] for i in range(_UpperCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
a_ : List[Any] = {
"allenai/led-base-16384": 1_6384,
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =LEDTokenizer
__UpperCamelCase =["input_ids", "attention_mask"]
def __init__( self : Tuple , snake_case__ : List[Any]=None , snake_case__ : List[str]=None , snake_case__ : List[str]=None , snake_case__ : Dict="replace" , snake_case__ : Tuple="<s>" , snake_case__ : Optional[Any]="</s>" , snake_case__ : int="</s>" , snake_case__ : Dict="<s>" , snake_case__ : Union[str, Any]="<unk>" , snake_case__ : Optional[int]="<pad>" , snake_case__ : List[str]="<mask>" , snake_case__ : List[Any]=False , snake_case__ : int=True , **snake_case__ : Dict , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = pre_tok_class(**snake_case__ )
SCREAMING_SNAKE_CASE = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE = 'post_processor'
SCREAMING_SNAKE_CASE = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE = tuple(state['cls'] )
SCREAMING_SNAKE_CASE = False
if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = True
if state.get('trim_offsets' , snake_case__ ) != trim_offsets:
SCREAMING_SNAKE_CASE = trim_offsets
SCREAMING_SNAKE_CASE = True
if changes_to_apply:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , state.pop('type' ) )
SCREAMING_SNAKE_CASE = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase ( self : List[Any] , snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
SCREAMING_SNAKE_CASE = value
def UpperCamelCase ( self : Dict , *snake_case__ : Optional[Any] , **snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : List[str] , *snake_case__ : List[Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : Tuple=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Union[Dict[str, EncodedInput], BatchEncoding] , snake_case__ : Optional[int] = None , snake_case__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = super()._pad(
encoded_inputs=snake_case__ , max_length=snake_case__ , padding_strategy=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE = len(encoded_inputs['global_attention_mask'] ) != len(snake_case__ )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE = len(snake_case__ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 673 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase =1
@register_to_config
def __init__( self : Union[str, Any] , snake_case__ : Any=2_0_0_0 , snake_case__ : Optional[int]=0.1 , snake_case__ : List[Any]=2_0 , snake_case__ : Dict=1E-3 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self : Optional[int] , snake_case__ : str , snake_case__ : Union[str, torch.device] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.linspace(1 , self.config.sampling_eps , snake_case__ , device=snake_case__ )
def UpperCamelCase ( self : str , snake_case__ : str , snake_case__ : int , snake_case__ : int , snake_case__ : List[Any]=None ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
SCREAMING_SNAKE_CASE = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
SCREAMING_SNAKE_CASE = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
SCREAMING_SNAKE_CASE = std.flatten()
while len(std.shape ) < len(score.shape ):
SCREAMING_SNAKE_CASE = std.unsqueeze(-1 )
SCREAMING_SNAKE_CASE = -score / std
# compute
SCREAMING_SNAKE_CASE = -1.0 / len(self.timesteps )
SCREAMING_SNAKE_CASE = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
SCREAMING_SNAKE_CASE = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
SCREAMING_SNAKE_CASE = beta_t.unsqueeze(-1 )
SCREAMING_SNAKE_CASE = -0.5 * beta_t * x
SCREAMING_SNAKE_CASE = torch.sqrt(snake_case__ )
SCREAMING_SNAKE_CASE = drift - diffusion**2 * score
SCREAMING_SNAKE_CASE = x + drift * dt
# add noise
SCREAMING_SNAKE_CASE = randn_tensor(x.shape , layout=x.layout , generator=snake_case__ , device=x.device , dtype=x.dtype )
SCREAMING_SNAKE_CASE = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Any ):
"""simple docstring"""
return self.config.num_train_timesteps
| 673 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __lowerCAmelCase ( *_UpperCamelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(_UpperCamelCase , 'r' ) as fh:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_EX )
try:
print(*_UpperCamelCase )
finally:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_UN )
a_ : int = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
a_ : str = torch.device("cuda", local_rank)
a_ : Optional[int] = socket.gethostname()
a_ : Union[str, Any] = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
a_ : Dict = dist.get_rank()
a_ : Any = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 673 | 1 |
def __lowerCAmelCase ( _UpperCamelCase : int ) -> bool:
'''simple docstring'''
if num < 0:
return False
SCREAMING_SNAKE_CASE = num
SCREAMING_SNAKE_CASE = 0
while num > 0:
SCREAMING_SNAKE_CASE = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
a_ : Union[str, Any] = {"allegro/herbert-base-cased": 514}
a_ : List[Any] = {}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =HerbertTokenizer
def __init__( self : Tuple , snake_case__ : Optional[Any]=None , snake_case__ : int=None , snake_case__ : Optional[int]=None , snake_case__ : str="<s>" , snake_case__ : Tuple="<unk>" , snake_case__ : List[str]="<pad>" , snake_case__ : Tuple="<mask>" , snake_case__ : Dict="</s>" , **snake_case__ : List[str] , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , sep_token=snake_case__ , **snake_case__ , )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 673 | 1 |
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
a_ : Dict = [
"kernels/rwkv/wkv_cuda.cu",
"kernels/rwkv/wkv_op.cpp",
"kernels/deformable_detr/ms_deform_attn.h",
"kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh",
"models/graphormer/algos_graphormer.pyx",
]
def __lowerCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
a_ : Any = argparse.ArgumentParser()
parser.add_argument("--check_lib", action="store_true", help="Whether to check the build or the actual package.")
a_ : Optional[int] = parser.parse_args()
if args.check_lib:
a_ : List[Any] = importlib.import_module("transformers")
a_ : Union[str, Any] = Path(transformers_module.__file__).parent
else:
a_ : str = Path.cwd() / "build/lib/transformers"
if not test_custom_files_are_present(transformers_path):
raise ValueError("The built release does not contain the custom files. Fix this before going further!")
| 673 |
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = abs(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
while n > 0:
res += n % 10
n //= 10
return res
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = abs(_UpperCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
return sum(int(_UpperCamelCase ) for c in str(abs(_UpperCamelCase ) ) )
def __lowerCAmelCase ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_UpperCamelCase : Callable , _UpperCamelCase : int ) -> None:
SCREAMING_SNAKE_CASE = f"""{func.__name__}({value})"""
SCREAMING_SNAKE_CASE = timeit(f"""__main__.{call}""" , setup='import __main__' )
print(f"""{call:56} = {func(_UpperCamelCase )} -- {timing:.4f} seconds""" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_UpperCamelCase , _UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 673 | 1 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class UpperCamelCase ( nn.Module ):
__UpperCamelCase =42
__UpperCamelCase =jnp.floataa
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : List[Any] , snake_case__ : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = hidden_states.shape
SCREAMING_SNAKE_CASE = jax.image.resize(
snake_case__ , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
SCREAMING_SNAKE_CASE = self.conv(snake_case__ )
return hidden_states
class UpperCamelCase ( nn.Module ):
__UpperCamelCase =42
__UpperCamelCase =jnp.floataa
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : str , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.conv(snake_case__ )
return hidden_states
class UpperCamelCase ( nn.Module ):
__UpperCamelCase =42
__UpperCamelCase =None
__UpperCamelCase =0.0
__UpperCamelCase =None
__UpperCamelCase =jnp.floataa
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.in_channels if self.out_channels is None else self.out_channels
SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 )
SCREAMING_SNAKE_CASE = nn.Conv(
snake_case__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
SCREAMING_SNAKE_CASE = nn.Dense(snake_case__ , dtype=self.dtype )
SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 )
SCREAMING_SNAKE_CASE = nn.Dropout(self.dropout_prob )
SCREAMING_SNAKE_CASE = nn.Conv(
snake_case__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
SCREAMING_SNAKE_CASE = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
SCREAMING_SNAKE_CASE = None
if use_nin_shortcut:
SCREAMING_SNAKE_CASE = nn.Conv(
snake_case__ , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[Any]=True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = hidden_states
SCREAMING_SNAKE_CASE = self.norma(snake_case__ )
SCREAMING_SNAKE_CASE = nn.swish(snake_case__ )
SCREAMING_SNAKE_CASE = self.conva(snake_case__ )
SCREAMING_SNAKE_CASE = self.time_emb_proj(nn.swish(snake_case__ ) )
SCREAMING_SNAKE_CASE = jnp.expand_dims(jnp.expand_dims(snake_case__ , 1 ) , 1 )
SCREAMING_SNAKE_CASE = hidden_states + temb
SCREAMING_SNAKE_CASE = self.norma(snake_case__ )
SCREAMING_SNAKE_CASE = nn.swish(snake_case__ )
SCREAMING_SNAKE_CASE = self.dropout(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = self.conva(snake_case__ )
if self.conv_shortcut is not None:
SCREAMING_SNAKE_CASE = self.conv_shortcut(snake_case__ )
return hidden_states + residual
| 673 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 673 | 1 |
import socket
def __lowerCAmelCase ( ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
SCREAMING_SNAKE_CASE = socket.gethostname()
SCREAMING_SNAKE_CASE = 1_23_12
sock.connect((host, port) )
sock.send(b'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
SCREAMING_SNAKE_CASE = sock.recv(10_24 )
if not data:
break
out_file.write(_UpperCamelCase )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 673 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =AudioLDMPipeline
__UpperCamelCase =TEXT_TO_AUDIO_PARAMS
__UpperCamelCase =TEXT_TO_AUDIO_BATCH_PARAMS
__UpperCamelCase =frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(3_2, 6_4) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=3_2 , class_embeddings_concat=snake_case__ , )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , projection_dim=3_2 , )
SCREAMING_SNAKE_CASE = ClapTextModelWithProjection(snake_case__ )
SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=7_7 )
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6_0_0_0 , upsample_initial_channel=1_6 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=snake_case__ , )
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(snake_case__ )
SCREAMING_SNAKE_CASE = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def UpperCamelCase ( self : Optional[int] , snake_case__ : int , snake_case__ : int=0 ):
"""simple docstring"""
if str(snake_case__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE = torch.manual_seed(snake_case__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 2_5_6
SCREAMING_SNAKE_CASE = audio[:1_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder(
snake_case__ , )
SCREAMING_SNAKE_CASE = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE = F.normalize(snake_case__ , dim=-1 )
SCREAMING_SNAKE_CASE = prompt_embeds
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE = negative_prompt
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE = []
for p in [prompt, negative_prompt]:
SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder(
snake_case__ , )
SCREAMING_SNAKE_CASE = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE = F.normalize(snake_case__ , dim=-1 )
embeds.append(snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = embeds
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 'egg cracking'
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ , negative_prompt=snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 2_5_6
SCREAMING_SNAKE_CASE = audio[:1_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_5_6)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_5_6)
# test num_waveforms_per_prompt for single prompt
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=2 , num_waveforms_per_prompt=snake_case__ ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_5_6)
# test num_waveforms_per_prompt for batch of prompts
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=snake_case__ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6)
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config.sampling_rate
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.016 , **snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) / vocoder_sampling_rate == 0.016
SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.032 , **snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) / vocoder_sampling_rate == 0.032
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = ['hey']
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=1 )
SCREAMING_SNAKE_CASE = output.audios.shape
assert audio_shape == (1, 2_5_6)
SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(snake_case__ ).to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=1 )
SCREAMING_SNAKE_CASE = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_5_6)
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=snake_case__ )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case__ )
@slow
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : int , snake_case__ : int , snake_case__ : Tuple="cpu" , snake_case__ : List[str]=torch.floataa , snake_case__ : Optional[Any]=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = np.random.RandomState(snake_case__ ).standard_normal((1, 8, 1_2_8, 1_6) )
SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 2_5
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ).audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 8_1_9_2_0
SCREAMING_SNAKE_CASE = audio[7_7_2_3_0:7_7_2_4_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315] )
SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ).audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 8_1_9_2_0
SCREAMING_SNAKE_CASE = audio[2_7_7_8_0:2_7_7_9_0]
SCREAMING_SNAKE_CASE = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212] )
SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 673 | 1 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __lowerCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
for param in module.parameters():
SCREAMING_SNAKE_CASE = False
def __lowerCAmelCase ( ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
SCREAMING_SNAKE_CASE = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def __lowerCAmelCase ( _UpperCamelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = plt.imshow(_UpperCamelCase )
fig.axes.get_xaxis().set_visible(_UpperCamelCase )
fig.axes.get_yaxis().set_visible(_UpperCamelCase )
plt.show()
def __lowerCAmelCase ( ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = datetime.now()
SCREAMING_SNAKE_CASE = current_time.strftime('%H:%M:%S' )
return timestamp
| 673 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase :
def __init__( self : Dict , snake_case__ : str , snake_case__ : str=1_3 , snake_case__ : Tuple=7 , snake_case__ : Tuple=True , snake_case__ : Tuple=True , snake_case__ : List[str]=False , snake_case__ : Any=True , snake_case__ : Union[str, Any]=9_9 , snake_case__ : Dict=3_2 , snake_case__ : Optional[Any]=5 , snake_case__ : Optional[Any]=4 , snake_case__ : Union[str, Any]=3_7 , snake_case__ : Tuple="gelu" , snake_case__ : Dict=0.1 , snake_case__ : Any=0.1 , snake_case__ : int=5_1_2 , snake_case__ : Dict=1_6 , snake_case__ : str=2 , snake_case__ : Any=0.02 , snake_case__ : List[str]=3 , snake_case__ : int=4 , snake_case__ : List[str]=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , use_stable_embedding=snake_case__ , )
def UpperCamelCase ( self : int , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : str , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : str , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , )
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , )
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Tuple , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : int , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , use_cache=snake_case__ , )
SCREAMING_SNAKE_CASE = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0]
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , past_key_values=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCamelCase =(OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCamelCase =(
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'single_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'multi_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCamelCase ( self : str , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = ids_tensor([1, 1_0] , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
SCREAMING_SNAKE_CASE = original_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE = original_model(snake_case__ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
SCREAMING_SNAKE_CASE = scaled_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
| 673 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 673 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="openai/whisper-base"
__UpperCamelCase =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__UpperCamelCase ="transcriber"
__UpperCamelCase =WhisperProcessor
__UpperCamelCase =WhisperForConditionalGeneration
__UpperCamelCase =["audio"]
__UpperCamelCase =["text"]
def UpperCamelCase ( self : Dict , snake_case__ : Tuple ):
"""simple docstring"""
return self.pre_processor(snake_case__ , return_tensors='pt' ).input_features
def UpperCamelCase ( self : Optional[int] , snake_case__ : Tuple ):
"""simple docstring"""
return self.model.generate(inputs=snake_case__ )
def UpperCamelCase ( self : str , snake_case__ : Union[str, Any] ):
"""simple docstring"""
return self.pre_processor.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )[0]
| 673 | 1 |
def __lowerCAmelCase ( ) -> int:
'''simple docstring'''
return [
a * b * (10_00 - a - b)
for a in range(1 , 9_99 )
for b in range(_UpperCamelCase , 9_99 )
if (a * a + b * b == (10_00 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 673 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
a_ : List[str] = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
a_ : Dict = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
a_ : str = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
a_ : int = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def UpperCamelCase ( self : str ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def UpperCamelCase ( self : Dict , snake_case__ : int ):
"""simple docstring"""
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : List[Any]=0.9 , snake_case__ : Optional[Any]=3 , snake_case__ : Any=0.5 ):
"""simple docstring"""
if NLTK_VERSION >= version.Version('3.6.5' ):
SCREAMING_SNAKE_CASE = [
meteor_score.single_meteor_score(
word_tokenize(snake_case__ ) , word_tokenize(snake_case__ ) , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
else:
SCREAMING_SNAKE_CASE = [
meteor_score.single_meteor_score(snake_case__ , snake_case__ , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
return {"meteor": np.mean(snake_case__ )}
| 673 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : int = {
"andreasmadsen/efficient_mlm_m0.40": (
"https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"
),
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="roberta-prelayernorm"
def __init__( self : List[str] , snake_case__ : str=5_0_2_6_5 , snake_case__ : str=7_6_8 , snake_case__ : str=1_2 , snake_case__ : Any=1_2 , snake_case__ : Any=3_0_7_2 , snake_case__ : int="gelu" , snake_case__ : Union[str, Any]=0.1 , snake_case__ : Any=0.1 , snake_case__ : str=5_1_2 , snake_case__ : Optional[int]=2 , snake_case__ : int=0.02 , snake_case__ : str=1E-12 , snake_case__ : Any=1 , snake_case__ : Tuple=0 , snake_case__ : List[str]=2 , snake_case__ : Union[str, Any]="absolute" , snake_case__ : Optional[Any]=True , snake_case__ : List[Any]=None , **snake_case__ : Tuple , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = classifier_dropout
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
@property
def UpperCamelCase ( self : str ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 673 |
import numpy as np
def __lowerCAmelCase ( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def __lowerCAmelCase ( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 | 1 |
from __future__ import annotations
from cmath import sqrt
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int ) -> tuple[complex, complex]:
'''simple docstring'''
if a == 0:
raise ValueError('Coefficient \'a\' must not be zero.' )
SCREAMING_SNAKE_CASE = b * b - 4 * a * c
SCREAMING_SNAKE_CASE = (-b + sqrt(_UpperCamelCase )) / (2 * a)
SCREAMING_SNAKE_CASE = (-b - sqrt(_UpperCamelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def __lowerCAmelCase ( ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = quadratic_roots(a=5 , b=6 , c=1 )
print(f"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 673 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a_ : Any = logging.get_logger(__name__)
a_ : Dict = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="van"
def __init__( self : Optional[Any] , snake_case__ : Tuple=2_2_4 , snake_case__ : Dict=3 , snake_case__ : Union[str, Any]=[7, 3, 3, 3] , snake_case__ : str=[4, 2, 2, 2] , snake_case__ : Optional[Any]=[6_4, 1_2_8, 3_2_0, 5_1_2] , snake_case__ : Optional[Any]=[3, 3, 1_2, 3] , snake_case__ : Tuple=[8, 8, 4, 4] , snake_case__ : Any="gelu" , snake_case__ : Dict=0.02 , snake_case__ : List[str]=1E-6 , snake_case__ : int=1E-2 , snake_case__ : Any=0.0 , snake_case__ : Tuple=0.0 , **snake_case__ : Any , ):
"""simple docstring"""
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_sizes
SCREAMING_SNAKE_CASE = strides
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = mlp_ratios
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = layer_scale_init_value
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = dropout_rate
| 673 | 1 |
import torch
from diffusers import StableDiffusionPipeline
a_ : int = "path-to-your-trained-model"
a_ : List[str] = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
a_ : Dict = "A photo of sks dog in a bucket"
a_ : Dict = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 673 |
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : int ) -> list[str]:
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(_UpperCamelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 673 | 1 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __lowerCAmelCase ( *_UpperCamelCase : int ) -> Tuple:
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
SCREAMING_SNAKE_CASE = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __lowerCAmelCase ( _UpperCamelCase : Exception ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [
'CUDA out of memory.', # CUDA OOM
'cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.', # CUDNN SNAFU
'DefaultCPUAllocator: can\'t allocate memory', # CPU OOM
]
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __lowerCAmelCase ( _UpperCamelCase : callable = None , _UpperCamelCase : int = 1_28 ) -> str:
'''simple docstring'''
if function is None:
return functools.partial(_UpperCamelCase , starting_batch_size=_UpperCamelCase )
SCREAMING_SNAKE_CASE = starting_batch_size
def decorator(*_UpperCamelCase : Union[str, Any] , **_UpperCamelCase : Tuple ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE = list(inspect.signature(_UpperCamelCase ).parameters.keys() )
# Guard against user error
if len(_UpperCamelCase ) < (len(_UpperCamelCase ) + 1):
SCREAMING_SNAKE_CASE = ', '.join([f"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f"""Batch size was passed into `{function.__name__}` as the first argument when called."""
f"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" )
while True:
if batch_size == 0:
raise RuntimeError('No executable batch size found, reached zero.' )
try:
return function(_UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase )
except Exception as e:
if should_reduce_batch_size(_UpperCamelCase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 673 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
a_ : Dict = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : Any , **snake_case__ : Optional[int] ):
"""simple docstring"""
super().__init__(**snake_case__ )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , 'vision' )
self.check_model_type(snake_case__ )
def __call__( self : List[Any] , snake_case__ : Union[str, "Image.Image", List[Dict[str, Any]]] , snake_case__ : Union[str, List[str]] = None , **snake_case__ : Union[str, Any] , ):
"""simple docstring"""
if "text_queries" in kwargs:
SCREAMING_SNAKE_CASE = kwargs.pop('text_queries' )
if isinstance(snake_case__ , (str, Image.Image) ):
SCREAMING_SNAKE_CASE = {'image': image, 'candidate_labels': candidate_labels}
else:
SCREAMING_SNAKE_CASE = image
SCREAMING_SNAKE_CASE = super().__call__(snake_case__ , **snake_case__ )
return results
def UpperCamelCase ( self : Union[str, Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {}
if "threshold" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['threshold']
if "top_k" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['top_k']
return {}, {}, postprocess_params
def UpperCamelCase ( self : List[Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = load_image(inputs['image'] )
SCREAMING_SNAKE_CASE = inputs['candidate_labels']
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = candidate_labels.split(',' )
SCREAMING_SNAKE_CASE = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(snake_case__ ):
SCREAMING_SNAKE_CASE = self.tokenizer(snake_case__ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE = self.image_processor(snake_case__ , return_tensors=self.framework )
yield {
"is_last": i == len(snake_case__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def UpperCamelCase ( self : Any , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = model_inputs.pop('target_size' )
SCREAMING_SNAKE_CASE = model_inputs.pop('candidate_label' )
SCREAMING_SNAKE_CASE = model_inputs.pop('is_last' )
SCREAMING_SNAKE_CASE = self.model(**snake_case__ )
SCREAMING_SNAKE_CASE = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : str=0.1 , snake_case__ : Union[str, Any]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
for model_output in model_outputs:
SCREAMING_SNAKE_CASE = model_output['candidate_label']
SCREAMING_SNAKE_CASE = BaseModelOutput(snake_case__ )
SCREAMING_SNAKE_CASE = self.image_processor.post_process_object_detection(
outputs=snake_case__ , threshold=snake_case__ , target_sizes=model_output['target_size'] )[0]
for index in outputs["scores"].nonzero():
SCREAMING_SNAKE_CASE = outputs['scores'][index].item()
SCREAMING_SNAKE_CASE = self._get_bounding_box(outputs['boxes'][index][0] )
SCREAMING_SNAKE_CASE = {'score': score, 'label': label, 'box': box}
results.append(snake_case__ )
SCREAMING_SNAKE_CASE = sorted(snake_case__ , key=lambda snake_case__ : x["score"] , reverse=snake_case__ )
if top_k:
SCREAMING_SNAKE_CASE = results[:top_k]
return results
def UpperCamelCase ( self : List[Any] , snake_case__ : "torch.Tensor" ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = box.int().tolist()
SCREAMING_SNAKE_CASE = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 673 | 1 |
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : bool = False ) -> bool:
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_31_70_44_06_46_79_88_73_85_96_19_81 and not allow_probable:
raise ValueError(
'Warning: upper bound of deterministic test is exceeded. '
'Pass allow_probable=True to allow probabilistic test. '
'A return value of True indicates a probable prime.' )
# array bounds provided by analysis
SCREAMING_SNAKE_CASE = [
20_47,
1_37_36_53,
25_32_60_01,
32_15_03_17_51,
2_15_23_02_89_87_47,
3_47_47_49_66_03_83,
3_41_55_00_71_72_83_21,
1,
3_82_51_23_05_65_46_41_30_51,
1,
1,
31_86_65_85_78_34_03_11_51_16_74_61,
3_31_70_44_06_46_79_88_73_85_96_19_81,
]
SCREAMING_SNAKE_CASE = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_UpperCamelCase , 1 ):
if n < _p:
# then we have our last prime to check
SCREAMING_SNAKE_CASE = primes[:idx]
break
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
SCREAMING_SNAKE_CASE = False
for r in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = pow(_UpperCamelCase , d * 2**r , _UpperCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
SCREAMING_SNAKE_CASE = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def __lowerCAmelCase ( ) -> None:
'''simple docstring'''
assert not miller_rabin(5_61 )
assert miller_rabin(5_63 )
# 2047
assert not miller_rabin(83_82_01 )
assert miller_rabin(83_82_07 )
# 1_373_653
assert not miller_rabin(17_31_60_01 )
assert miller_rabin(17_31_60_17 )
# 25_326_001
assert not miller_rabin(30_78_38_66_41 )
assert miller_rabin(30_78_38_66_53 )
# 3_215_031_751
assert not miller_rabin(1_71_30_45_57_48_01 )
assert miller_rabin(1_71_30_45_57_48_19 )
# 2_152_302_898_747
assert not miller_rabin(2_77_97_99_72_83_07 )
assert miller_rabin(2_77_97_99_72_83_27 )
# 3_474_749_660_383
assert not miller_rabin(1_13_85_00_23_90_94_41 )
assert miller_rabin(1_13_85_00_23_90_95_27 )
# 341_550_071_728_321
assert not miller_rabin(1_27_50_41_01_88_48_80_43_51 )
assert miller_rabin(1_27_50_41_01_88_48_80_43_91 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_96_66_46_44_58_50_77_87_79_18_67 )
assert miller_rabin(7_96_66_46_44_58_50_77_87_79_19_51 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(55_28_40_67_74_46_64_78_97_66_03_33 )
assert miller_rabin(55_28_40_67_74_46_64_78_97_66_03_59 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 673 |
def __lowerCAmelCase ( _UpperCamelCase : int = 10_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 2**power
SCREAMING_SNAKE_CASE = str(_UpperCamelCase )
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
for i in list_num:
sum_of_num += int(_UpperCamelCase )
return sum_of_num
if __name__ == "__main__":
a_ : List[str] = int(input("Enter the power of 2: ").strip())
print("2 ^ ", power, " = ", 2**power)
a_ : int = solution(power)
print("Sum of the digits is: ", result)
| 673 | 1 |
from typing import Dict, Optional
import numpy as np
import datasets
a_ : List[Any] = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
a_ : List[Any] = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
a_ : str = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def __lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : bool , _UpperCamelCase : Optional[Dict[int, int]] = None , _UpperCamelCase : bool = False , ) -> int:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
SCREAMING_SNAKE_CASE = new_id
# turn into Numpy arrays
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
if reduce_labels:
SCREAMING_SNAKE_CASE = 2_55
SCREAMING_SNAKE_CASE = label - 1
SCREAMING_SNAKE_CASE = 2_55
SCREAMING_SNAKE_CASE = label != ignore_index
SCREAMING_SNAKE_CASE = np.not_equal(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = pred_label[mask]
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )[mask]
SCREAMING_SNAKE_CASE = pred_label[pred_label == label]
SCREAMING_SNAKE_CASE = np.histogram(_UpperCamelCase , bins=_UpperCamelCase , range=(0, num_labels - 1) )[0]
SCREAMING_SNAKE_CASE = np.histogram(_UpperCamelCase , bins=_UpperCamelCase , range=(0, num_labels - 1) )[0]
SCREAMING_SNAKE_CASE = np.histogram(_UpperCamelCase , bins=_UpperCamelCase , range=(0, num_labels - 1) )[0]
SCREAMING_SNAKE_CASE = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : Any , _UpperCamelCase : bool , _UpperCamelCase : Optional[Dict[int, int]] = None , _UpperCamelCase : bool = False , ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.zeros((num_labels,) , dtype=np.floataa )
SCREAMING_SNAKE_CASE = np.zeros((num_labels,) , dtype=np.floataa )
SCREAMING_SNAKE_CASE = np.zeros((num_labels,) , dtype=np.floataa )
SCREAMING_SNAKE_CASE = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = intersect_and_union(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : bool , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[Dict[int, int]] = None , _UpperCamelCase : bool = False , ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = total_intersect_and_union(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# compute metrics
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = total_area_intersect.sum() / total_area_label.sum()
SCREAMING_SNAKE_CASE = total_area_intersect / total_area_union
SCREAMING_SNAKE_CASE = total_area_intersect / total_area_label
SCREAMING_SNAKE_CASE = np.nanmean(_UpperCamelCase )
SCREAMING_SNAKE_CASE = np.nanmean(_UpperCamelCase )
SCREAMING_SNAKE_CASE = all_acc
SCREAMING_SNAKE_CASE = iou
SCREAMING_SNAKE_CASE = acc
if nan_to_num is not None:
SCREAMING_SNAKE_CASE = {metric: np.nan_to_num(_UpperCamelCase , nan=_UpperCamelCase ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def UpperCamelCase ( self : str ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
} ) , reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] , )
def UpperCamelCase ( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : bool , snake_case__ : Optional[int] = None , snake_case__ : Optional[Dict[int, int]] = None , snake_case__ : bool = False , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = mean_iou(
results=snake_case__ , gt_seg_maps=snake_case__ , num_labels=snake_case__ , ignore_index=snake_case__ , nan_to_num=snake_case__ , label_map=snake_case__ , reduce_labels=snake_case__ , )
return iou_result
| 673 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="facebook/bart-large-mnli"
__UpperCamelCase =(
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
__UpperCamelCase ="text_classifier"
__UpperCamelCase =AutoTokenizer
__UpperCamelCase =AutoModelForSequenceClassification
__UpperCamelCase =["text", ["text"]]
__UpperCamelCase =["text"]
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().setup()
SCREAMING_SNAKE_CASE = self.model.config
SCREAMING_SNAKE_CASE = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
SCREAMING_SNAKE_CASE = int(snake_case__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[str] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = labels
return self.pre_processor(
[text] * len(snake_case__ ) , [F"""This example is {label}""" for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCamelCase ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = outputs.logits
SCREAMING_SNAKE_CASE = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 673 | 1 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
a_ : List[Any] = "src/diffusers"
a_ : Any = "."
# This is to make sure the diffusers module imported is the one in the repo.
a_ : Union[str, Any] = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
a_ : Union[str, Any] = spec.loader.load_module()
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
return line.startswith(_UpperCamelCase ) or len(_UpperCamelCase ) <= 1 or re.search(R'^\s*\)(\s*->.*:|:)\s*$' , _UpperCamelCase ) is not None
def __lowerCAmelCase ( _UpperCamelCase : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = object_name.split('.' )
SCREAMING_SNAKE_CASE = 0
# First let's find the module where our object lives.
SCREAMING_SNAKE_CASE = parts[i]
while i < len(_UpperCamelCase ) and not os.path.isfile(os.path.join(_UpperCamelCase , f"""{module}.py""" ) ):
i += 1
if i < len(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , parts[i] )
if i >= len(_UpperCamelCase ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(_UpperCamelCase , f"""{module}.py""" ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
SCREAMING_SNAKE_CASE = f.readlines()
# Now let's find the class / func in the code!
SCREAMING_SNAKE_CASE = ''
SCREAMING_SNAKE_CASE = 0
for name in parts[i + 1 :]:
while (
line_index < len(_UpperCamelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(_UpperCamelCase ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
SCREAMING_SNAKE_CASE = line_index
while line_index < len(_UpperCamelCase ) and _should_continue(lines[line_index] , _UpperCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
SCREAMING_SNAKE_CASE = lines[start_index:line_index]
return "".join(_UpperCamelCase )
a_ : Optional[int] = re.compile(R"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
a_ : Union[str, Any] = re.compile(R"^\s*(\S+)->(\S+)(\s+.*|$)")
a_ : Optional[Any] = re.compile(R"<FILL\s+[^>]*>")
def __lowerCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = code.split('\n' )
SCREAMING_SNAKE_CASE = 0
while idx < len(_UpperCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(_UpperCamelCase ):
return re.search(R'^(\s*)\S' , lines[idx] ).groups()[0]
return ""
def __lowerCAmelCase ( _UpperCamelCase : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = len(get_indent(_UpperCamelCase ) ) > 0
if has_indent:
SCREAMING_SNAKE_CASE = f"""class Bla:\n{code}"""
SCREAMING_SNAKE_CASE = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=_UpperCamelCase )
SCREAMING_SNAKE_CASE = black.format_str(_UpperCamelCase , mode=_UpperCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = style_docstrings_in_code(_UpperCamelCase )
return result[len('class Bla:\n' ) :] if has_indent else result
def __lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any]=False ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCamelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
SCREAMING_SNAKE_CASE = f.readlines()
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = search.groups()
SCREAMING_SNAKE_CASE = find_code_in_diffusers(_UpperCamelCase )
SCREAMING_SNAKE_CASE = get_indent(_UpperCamelCase )
SCREAMING_SNAKE_CASE = line_index + 1 if indent == theoretical_indent else line_index + 2
SCREAMING_SNAKE_CASE = theoretical_indent
SCREAMING_SNAKE_CASE = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
SCREAMING_SNAKE_CASE = True
while line_index < len(_UpperCamelCase ) and should_continue:
line_index += 1
if line_index >= len(_UpperCamelCase ):
break
SCREAMING_SNAKE_CASE = lines[line_index]
SCREAMING_SNAKE_CASE = _should_continue(_UpperCamelCase , _UpperCamelCase ) and re.search(f"""^{indent}# End copy""" , _UpperCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
SCREAMING_SNAKE_CASE = lines[start_index:line_index]
SCREAMING_SNAKE_CASE = ''.join(_UpperCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
SCREAMING_SNAKE_CASE = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(_UpperCamelCase ) is None]
SCREAMING_SNAKE_CASE = '\n'.join(_UpperCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(_UpperCamelCase ) > 0:
SCREAMING_SNAKE_CASE = replace_pattern.replace('with' , '' ).split(',' )
SCREAMING_SNAKE_CASE = [_re_replace_pattern.search(_UpperCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = pattern.groups()
SCREAMING_SNAKE_CASE = re.sub(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if option.strip() == "all-casing":
SCREAMING_SNAKE_CASE = re.sub(obja.lower() , obja.lower() , _UpperCamelCase )
SCREAMING_SNAKE_CASE = re.sub(obja.upper() , obja.upper() , _UpperCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
SCREAMING_SNAKE_CASE = blackify(lines[start_index - 1] + theoretical_code )
SCREAMING_SNAKE_CASE = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
SCREAMING_SNAKE_CASE = lines[:start_index] + [theoretical_code] + lines[line_index:]
SCREAMING_SNAKE_CASE = start_index + 1
if overwrite and len(_UpperCamelCase ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(_UpperCamelCase )
return diffs
def __lowerCAmelCase ( _UpperCamelCase : bool = False ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = glob.glob(os.path.join(_UpperCamelCase , '**/*.py' ) , recursive=_UpperCamelCase )
SCREAMING_SNAKE_CASE = []
for filename in all_files:
SCREAMING_SNAKE_CASE = is_copy_consistent(_UpperCamelCase , _UpperCamelCase )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(_UpperCamelCase ) > 0:
SCREAMING_SNAKE_CASE = '\n'.join(_UpperCamelCase )
raise Exception(
'Found the following copy inconsistencies:\n'
+ diff
+ '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
a_ : Optional[Any] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 673 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
a_ : str = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
a_ : int = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.1_5},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
a_ : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a_ : List[Any] = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
a_ : Any = "allenai"
def __lowerCAmelCase ( _UpperCamelCase : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict((re.sub(R'@@$' , '' , _UpperCamelCase ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , _UpperCamelCase ), v) for k, v in d.items() )
SCREAMING_SNAKE_CASE = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
SCREAMING_SNAKE_CASE = d[k] # restore
return da
def __lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : List[str] ) -> Dict:
'''simple docstring'''
assert os.path.exists(_UpperCamelCase )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
SCREAMING_SNAKE_CASE = basename(_UpperCamelCase )
SCREAMING_SNAKE_CASE = dirname(_UpperCamelCase )
SCREAMING_SNAKE_CASE = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
SCREAMING_SNAKE_CASE = cls.hub_models()
SCREAMING_SNAKE_CASE = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
SCREAMING_SNAKE_CASE = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f"""using checkpoint {checkpoint_file}""" )
SCREAMING_SNAKE_CASE = hub_utils.from_pretrained(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , archive_map=_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = vars(chkpt['args']['model'] )
SCREAMING_SNAKE_CASE = args['source_lang']
SCREAMING_SNAKE_CASE = args['target_lang']
SCREAMING_SNAKE_CASE = dirname(_UpperCamelCase )
SCREAMING_SNAKE_CASE = basename(_UpperCamelCase )
# dicts
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , f"""dict.{src_lang}.txt""" )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , f"""dict.{tgt_lang}.txt""" )
SCREAMING_SNAKE_CASE = Dictionary.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = rewrite_dict_keys(src_dict.indices )
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'vocab-src.json' )
print(f"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
SCREAMING_SNAKE_CASE = True
for k in src_vocab.keys():
if not k.islower():
SCREAMING_SNAKE_CASE = False
break
SCREAMING_SNAKE_CASE = Dictionary.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = rewrite_dict_keys(tgt_dict.indices )
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'vocab-tgt.json' )
print(f"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# merges_file (bpecodes)
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
if os.path.exists(_UpperCamelCase ):
break
with open(_UpperCamelCase , encoding='utf-8' ) as fin:
SCREAMING_SNAKE_CASE = fin.read()
SCREAMING_SNAKE_CASE = re.sub(R' \d+$' , '' , _UpperCamelCase , 0 , re.M ) # remove frequency number
print(f"""Generating {merges_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as fout:
fout.write(_UpperCamelCase )
# model config
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f"""need to extend tokenizer to support bpe={args['bpe']}"""
assert args["tokenizer"] == "moses", f"""need to extend tokenizer to support bpe={args['tokenizer']}"""
SCREAMING_SNAKE_CASE = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
SCREAMING_SNAKE_CASE = 5
SCREAMING_SNAKE_CASE = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
SCREAMING_SNAKE_CASE = best_score_hparams[model_dir]['length_penalty']
else:
SCREAMING_SNAKE_CASE = 1.0
print(f"""Generating {fsmt_model_config_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# tokenizer config
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = {
'langs': [src_lang, tgt_lang],
'model_max_length': 10_24,
'do_lower_case': do_lower_case,
}
print(f"""Generating {fsmt_tokenizer_config_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# model
SCREAMING_SNAKE_CASE = chkpt['models'][0]
SCREAMING_SNAKE_CASE = model.state_dict()
# rename keys to start with 'model.'
SCREAMING_SNAKE_CASE = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
SCREAMING_SNAKE_CASE = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = FSMTConfig.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = FSMTForConditionalGeneration(_UpperCamelCase )
# check that it loads ok
model_new.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
# save
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(_UpperCamelCase , _UpperCamelCase )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f"""cd {data_root}""" )
print(f"""transformers-cli upload {model_dir}""" )
if __name__ == "__main__":
a_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a_ : int = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 673 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __lowerCAmelCase ( _UpperCamelCase : Union[str, Any]=None ) -> Union[str, Any]:
'''simple docstring'''
if subparsers is not None:
SCREAMING_SNAKE_CASE = subparsers.add_parser('test' )
else:
SCREAMING_SNAKE_CASE = argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=_UpperCamelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=_UpperCamelCase )
return parser
def __lowerCAmelCase ( _UpperCamelCase : Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
SCREAMING_SNAKE_CASE = script_name
else:
SCREAMING_SNAKE_CASE = f"""--config_file={args.config_file} {script_name}"""
SCREAMING_SNAKE_CASE = ['accelerate-launch'] + test_args.split()
SCREAMING_SNAKE_CASE = execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def __lowerCAmelCase ( ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = test_command_parser()
SCREAMING_SNAKE_CASE = parser.parse_args()
test_command(_UpperCamelCase )
if __name__ == "__main__":
main()
| 673 |
import random
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : float , _UpperCamelCase : bool = False ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {i: [] for i in range(_UpperCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_UpperCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_UpperCamelCase ):
for j in range(i + 1 , _UpperCamelCase ):
if random.random() < probability:
graph[i].append(_UpperCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_UpperCamelCase )
return graph
def __lowerCAmelCase ( _UpperCamelCase : int ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(_UpperCamelCase ) if i != j] for i in range(_UpperCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 | 1 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class UpperCamelCase ( unittest.TestCase ):
def __init__( self : Any , snake_case__ : Tuple , snake_case__ : Tuple=1_3 , snake_case__ : List[Any]=7 , snake_case__ : Optional[int]=True , snake_case__ : Union[str, Any]=True , snake_case__ : Any=True , snake_case__ : Dict=True , snake_case__ : Optional[Any]=9_9 , snake_case__ : List[Any]=3_2 , snake_case__ : Any=5 , snake_case__ : Union[str, Any]=4 , snake_case__ : Optional[int]=3_7 , snake_case__ : Tuple="gelu" , snake_case__ : List[str]=0.1 , snake_case__ : Any=0.1 , snake_case__ : Dict=5_1_2 , snake_case__ : Any=1_6 , snake_case__ : Optional[Any]=2 , snake_case__ : str=0.02 , snake_case__ : List[str]=4 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_attention_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_choices
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=snake_case__ , )
return config, input_ids, attention_mask
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = FlaxDistilBertModelTester(self )
@slow
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class_name.from_pretrained('distilbert-base-uncased' )
SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case__ )
@require_flax
class UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased' )
SCREAMING_SNAKE_CASE = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
SCREAMING_SNAKE_CASE = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )[0]
SCREAMING_SNAKE_CASE = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , snake_case__ )
SCREAMING_SNAKE_CASE = np.array([[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , snake_case__ , atol=1E-4 ) )
| 673 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
def __init__( self : List[str] , snake_case__ : Dict , snake_case__ : Optional[Any]=1_3 , snake_case__ : Union[str, Any]=7 , snake_case__ : List[str]=True , snake_case__ : Any=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=9_9 , snake_case__ : str=3_2 , snake_case__ : Dict=5 , snake_case__ : str=4 , snake_case__ : int=3_7 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : Any=0.1 , snake_case__ : Dict=0.1 , snake_case__ : Optional[Any]=5_1_2 , snake_case__ : List[Any]=1_6 , snake_case__ : str=2 , snake_case__ : int=0.02 , snake_case__ : List[str]=3 , snake_case__ : Dict=4 , snake_case__ : str=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def UpperCamelCase ( self : List[str] , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ , token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : List[Any] , snake_case__ : str , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : int , snake_case__ : int , snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = NystromformerForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = NystromformerForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self : int , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = NystromformerForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase =(
{
"feature-extraction": NystromformerModel,
"fill-mask": NystromformerForMaskedLM,
"question-answering": NystromformerForQuestionAnswering,
"text-classification": NystromformerForSequenceClassification,
"token-classification": NystromformerForTokenClassification,
"zero-shot": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
@slow
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(snake_case__ )[0]
SCREAMING_SNAKE_CASE = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , snake_case__ )
SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.4_532, -0.0_936, 0.5_137], [-0.2_676, 0.0_628, 0.6_186], [-0.3_629, -0.1_726, 0.4_716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'the [MASK] of Belgium is Brussels'
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = tokenizer(snake_case__ , return_tensors='pt' )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(encoding.input_ids ).logits
SCREAMING_SNAKE_CASE = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(snake_case__ ) , 'capital' )
| 673 | 1 |
from collections import defaultdict
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = True
for v in tree[start]:
if v not in visited:
ret += dfs(_UpperCamelCase )
if ret % 2 == 0:
cuts.append(_UpperCamelCase )
return ret
def __lowerCAmelCase ( ) -> Union[str, Any]:
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
a_ , a_ : Optional[int] = 10, 9
a_ : int = defaultdict(list)
a_ : dict[int, bool] = {}
a_ : list[int] = []
a_ : Dict = 0
a_ : List[Any] = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 673 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 673 | 1 |
def __lowerCAmelCase ( _UpperCamelCase : int = 1_00_00_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = set(range(3 , _UpperCamelCase , 2 ) )
primes.add(2 )
for p in range(3 , _UpperCamelCase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _UpperCamelCase , _UpperCamelCase ) ) )
SCREAMING_SNAKE_CASE = [float(_UpperCamelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(_UpperCamelCase , limit + 1 , _UpperCamelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 673 |
import heapq
import sys
import numpy as np
a_ : Optional[int] = tuple[int, int]
class UpperCamelCase :
def __init__( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('inf' )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return len(self.elements ) == 0
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(snake_case__ )
else:
# update
# print("update", item)
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def UpperCamelCase ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
if item in self.set:
self.set.remove(snake_case__ )
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def UpperCamelCase ( self : str ):
"""simple docstring"""
return self.elements[0][1]
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
self.set.remove(snake_case__ )
return (priority, item)
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
return np.linalg.norm(a - b )
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Dict:
'''simple docstring'''
return consistent_heuristic(_UpperCamelCase , _UpperCamelCase ) // t
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Optional[int]:
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : int , _UpperCamelCase : TPos , _UpperCamelCase : dict[TPos, float] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = g_function[start] + Wa * heuristics[i](_UpperCamelCase , _UpperCamelCase )
return ans
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.chararray((n, n) )
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = '*'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (j, (n - 1) - i) in blocks:
SCREAMING_SNAKE_CASE = '#'
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = x
# print(x)
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[x]
SCREAMING_SNAKE_CASE = '-'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
print(_UpperCamelCase , end=' ' )
SCREAMING_SNAKE_CASE = back_pointer[x]
print(_UpperCamelCase )
sys.exit()
def __lowerCAmelCase ( _UpperCamelCase : TPos ) -> Any:
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , ) -> List[Any]:
'''simple docstring'''
for itera in range(_UpperCamelCase ):
open_list[itera].remove_element(_UpperCamelCase )
# print("s", s)
# print("j", j)
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = s
SCREAMING_SNAKE_CASE = (x - 1, y)
SCREAMING_SNAKE_CASE = (x + 1, y)
SCREAMING_SNAKE_CASE = (x, y + 1)
SCREAMING_SNAKE_CASE = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_UpperCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_UpperCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = float('inf' )
if valid(_UpperCamelCase ) and g_function[neighbours] > g_function[s] + 1:
SCREAMING_SNAKE_CASE = g_function[s] + 1
SCREAMING_SNAKE_CASE = s
if neighbours not in close_list_anchor:
open_list[0].put(_UpperCamelCase , key(_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , _UpperCamelCase ):
if key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) <= Wa * key(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ):
open_list[j].put(
_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
def __lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a_ : str = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a_ : List[str] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a_ : Union[str, Any] = make_common_ground()
a_ : Tuple = blocks_blk
# hyper parameters
a_ : Any = 1
a_ : List[str] = 1
a_ : Union[str, Any] = 20
a_ : Optional[Any] = 3 # one consistent and two other inconsistent
# start and end destination
a_ : int = (0, 0)
a_ : Optional[int] = (n - 1, n - 1)
a_ : Union[str, Any] = 1
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos , _UpperCamelCase : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {start: 0, goal: float('inf' )}
SCREAMING_SNAKE_CASE = {start: -1, goal: -1}
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
for i in range(_UpperCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , _UpperCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = open_list[i].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_inad.append(_UpperCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = open_list[0].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_anchor.append(_UpperCamelCase )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_UpperCamelCase ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 673 | 1 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
a_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
a_ : str = parser.parse_args()
a_ : Any = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
a_ : Optional[Any] = CLIPImageProcessor()
a_ : List[str] = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
a_ : List[Any] = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 673 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ : str = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Any=8 ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
SCREAMING_SNAKE_CASE = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=5_12 , _UpperCamelCase : Union[str, Any]=5_12 ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
SCREAMING_SNAKE_CASE = np.array(pil_image.convert('RGB' ) )
SCREAMING_SNAKE_CASE = arr.astype(np.floataa ) / 1_27.5 - 1
SCREAMING_SNAKE_CASE = np.transpose(_UpperCamelCase , [2, 0, 1] )
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).unsqueeze(0 )
return image
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : int , snake_case__ : UNetaDConditionModel , snake_case__ : DDPMScheduler , snake_case__ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=snake_case__ , scheduler=snake_case__ , movq=snake_case__ , )
SCREAMING_SNAKE_CASE = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase ( self : Any , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = min(int(num_inference_steps * strength ) , snake_case__ )
SCREAMING_SNAKE_CASE = max(num_inference_steps - init_timestep , 0 )
SCREAMING_SNAKE_CASE = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase ( self : List[str] , snake_case__ : Dict , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : str=None ):
"""simple docstring"""
if not isinstance(snake_case__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(snake_case__ )}""" )
SCREAMING_SNAKE_CASE = image.to(device=snake_case__ , dtype=snake_case__ )
SCREAMING_SNAKE_CASE = batch_size * num_images_per_prompt
if image.shape[1] == 4:
SCREAMING_SNAKE_CASE = image
else:
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(snake_case__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case__ )
]
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
else:
SCREAMING_SNAKE_CASE = self.movq.encode(snake_case__ ).latent_dist.sample(snake_case__ )
SCREAMING_SNAKE_CASE = self.movq.config.scaling_factor * init_latents
SCREAMING_SNAKE_CASE = torch.cat([init_latents] , dim=0 )
SCREAMING_SNAKE_CASE = init_latents.shape
SCREAMING_SNAKE_CASE = randn_tensor(snake_case__ , generator=snake_case__ , device=snake_case__ , dtype=snake_case__ )
# get latents
SCREAMING_SNAKE_CASE = self.scheduler.add_noise(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = init_latents
return latents
def UpperCamelCase ( self : int , snake_case__ : List[str]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
SCREAMING_SNAKE_CASE = torch.device(F"""cuda:{gpu_id}""" )
SCREAMING_SNAKE_CASE = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case__ , snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[int]=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
SCREAMING_SNAKE_CASE = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=snake_case__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE = None
for cpu_offloaded_model in [self.unet, self.movq]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cpu_offload_with_hook(snake_case__ , snake_case__ , prev_module_hook=snake_case__ )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case__ )
def __call__( self : str , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : int = 5_1_2 , snake_case__ : int = 5_1_2 , snake_case__ : int = 1_0_0 , snake_case__ : float = 4.0 , snake_case__ : float = 0.3 , snake_case__ : int = 1 , snake_case__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._execution_device
SCREAMING_SNAKE_CASE = guidance_scale > 1.0
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = image_embeds.shape[0]
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE = image_embeds.repeat_interleave(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = negative_image_embeds.repeat_interleave(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = [image]
if not all(isinstance(snake_case__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(snake_case__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
SCREAMING_SNAKE_CASE = torch.cat([prepare_image(snake_case__ , snake_case__ , snake_case__ ) for i in image] , dim=0 )
SCREAMING_SNAKE_CASE = image.to(dtype=image_embeds.dtype , device=snake_case__ )
SCREAMING_SNAKE_CASE = self.movq.encode(snake_case__ )['latents']
SCREAMING_SNAKE_CASE = latents.repeat_interleave(snake_case__ , dim=0 )
self.scheduler.set_timesteps(snake_case__ , device=snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_timesteps(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = timesteps[:1].repeat(batch_size * num_images_per_prompt )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = downscale_height_and_width(snake_case__ , snake_case__ , self.movq_scale_factor )
SCREAMING_SNAKE_CASE = self.prepare_latents(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , image_embeds.dtype , snake_case__ , snake_case__ )
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE = {'image_embeds': image_embeds}
SCREAMING_SNAKE_CASE = self.unet(
sample=snake_case__ , timestep=snake_case__ , encoder_hidden_states=snake_case__ , added_cond_kwargs=snake_case__ , return_dict=snake_case__ , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE = self.scheduler.step(
snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ , )[0]
# post-processing
SCREAMING_SNAKE_CASE = self.movq.decode(snake_case__ , force_not_quantize=snake_case__ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case__ )
| 673 | 1 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
# TODO Update this
a_ : List[str] = {
"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="esm"
def __init__( self : Optional[Any] , snake_case__ : List[str]=None , snake_case__ : Union[str, Any]=None , snake_case__ : int=None , snake_case__ : Union[str, Any]=7_6_8 , snake_case__ : Dict=1_2 , snake_case__ : str=1_2 , snake_case__ : Tuple=3_0_7_2 , snake_case__ : Optional[int]=0.1 , snake_case__ : str=0.1 , snake_case__ : List[Any]=1_0_2_6 , snake_case__ : Union[str, Any]=0.02 , snake_case__ : Tuple=1E-12 , snake_case__ : Optional[Any]="absolute" , snake_case__ : int=True , snake_case__ : List[Any]=None , snake_case__ : Optional[int]=False , snake_case__ : str=False , snake_case__ : Any=None , snake_case__ : Optional[int]=None , **snake_case__ : Optional[int] , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , mask_token_id=snake_case__ , **snake_case__ )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = emb_layer_norm_before
SCREAMING_SNAKE_CASE = token_dropout
SCREAMING_SNAKE_CASE = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.' )
SCREAMING_SNAKE_CASE = EsmFoldConfig()
elif isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = EsmFoldConfig(**snake_case__ )
SCREAMING_SNAKE_CASE = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' )
SCREAMING_SNAKE_CASE = get_default_vocab_list()
else:
SCREAMING_SNAKE_CASE = vocab_list
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , snake_case__ ):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' )
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = super().to_dict()
if isinstance(self.esmfold_config , snake_case__ ):
SCREAMING_SNAKE_CASE = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCamelCase :
__UpperCamelCase =None
__UpperCamelCase =True
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =0
__UpperCamelCase =True
__UpperCamelCase =False
__UpperCamelCase =1_28
__UpperCamelCase =None
def UpperCamelCase ( self : Any ):
"""simple docstring"""
if self.trunk is None:
SCREAMING_SNAKE_CASE = TrunkConfig()
elif isinstance(self.trunk , snake_case__ ):
SCREAMING_SNAKE_CASE = TrunkConfig(**self.trunk )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = asdict(self )
SCREAMING_SNAKE_CASE = self.trunk.to_dict()
return output
@dataclass
class UpperCamelCase :
__UpperCamelCase =48
__UpperCamelCase =10_24
__UpperCamelCase =1_28
__UpperCamelCase =32
__UpperCamelCase =32
__UpperCamelCase =32
__UpperCamelCase =0
__UpperCamelCase =0
__UpperCamelCase =False
__UpperCamelCase =4
__UpperCamelCase =1_28
__UpperCamelCase =None
def UpperCamelCase ( self : str ):
"""simple docstring"""
if self.structure_module is None:
SCREAMING_SNAKE_CASE = StructureModuleConfig()
elif isinstance(self.structure_module , snake_case__ ):
SCREAMING_SNAKE_CASE = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
SCREAMING_SNAKE_CASE = self.sequence_state_dim // self.sequence_head_width
SCREAMING_SNAKE_CASE = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = asdict(self )
SCREAMING_SNAKE_CASE = self.structure_module.to_dict()
return output
@dataclass
class UpperCamelCase :
__UpperCamelCase =3_84
__UpperCamelCase =1_28
__UpperCamelCase =16
__UpperCamelCase =1_28
__UpperCamelCase =12
__UpperCamelCase =4
__UpperCamelCase =8
__UpperCamelCase =0.1
__UpperCamelCase =8
__UpperCamelCase =1
__UpperCamelCase =2
__UpperCamelCase =7
__UpperCamelCase =10
__UpperCamelCase =1e-8
__UpperCamelCase =1e5
def UpperCamelCase ( self : int ):
"""simple docstring"""
return asdict(self )
def __lowerCAmelCase ( ) -> Dict:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 673 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
a_ : List[Any] = logging.get_logger("transformers.models.speecht5")
def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Dict:
'''simple docstring'''
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE = checkpoint['input_conv.weight_g']
SCREAMING_SNAKE_CASE = checkpoint['input_conv.weight_v']
SCREAMING_SNAKE_CASE = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.weight_g']
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.weight_v']
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : Any=None , _UpperCamelCase : List[str]=None , ) -> Tuple:
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig.from_pretrained(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.load(_UpperCamelCase )
load_weights(orig_checkpoint['model']['generator'] , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = np.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).float()
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).float()
model.save_pretrained(_UpperCamelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
a_ : Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 673 | 1 |
import math
class UpperCamelCase :
def __init__( self : List[str] , snake_case__ : Optional[int]=0 ): # a graph with Node 0,1,...,N-1
"""simple docstring"""
SCREAMING_SNAKE_CASE = n
SCREAMING_SNAKE_CASE = [
[math.inf for j in range(0 , snake_case__ )] for i in range(0 , snake_case__ )
] # adjacency matrix for weight
SCREAMING_SNAKE_CASE = [
[math.inf for j in range(0 , snake_case__ )] for i in range(0 , snake_case__ )
] # dp[i][j] stores minimum distance from i to j
def UpperCamelCase ( self : int , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = w
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
SCREAMING_SNAKE_CASE = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : Optional[int] ):
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
a_ : Tuple = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 673 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
a_ : List[Any] = {
"allenai/led-base-16384": 1_6384,
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =LEDTokenizer
__UpperCamelCase =["input_ids", "attention_mask"]
def __init__( self : Tuple , snake_case__ : List[Any]=None , snake_case__ : List[str]=None , snake_case__ : List[str]=None , snake_case__ : Dict="replace" , snake_case__ : Tuple="<s>" , snake_case__ : Optional[Any]="</s>" , snake_case__ : int="</s>" , snake_case__ : Dict="<s>" , snake_case__ : Union[str, Any]="<unk>" , snake_case__ : Optional[int]="<pad>" , snake_case__ : List[str]="<mask>" , snake_case__ : List[Any]=False , snake_case__ : int=True , **snake_case__ : Dict , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = pre_tok_class(**snake_case__ )
SCREAMING_SNAKE_CASE = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE = 'post_processor'
SCREAMING_SNAKE_CASE = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE = tuple(state['cls'] )
SCREAMING_SNAKE_CASE = False
if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = True
if state.get('trim_offsets' , snake_case__ ) != trim_offsets:
SCREAMING_SNAKE_CASE = trim_offsets
SCREAMING_SNAKE_CASE = True
if changes_to_apply:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , state.pop('type' ) )
SCREAMING_SNAKE_CASE = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase ( self : List[Any] , snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
SCREAMING_SNAKE_CASE = value
def UpperCamelCase ( self : Dict , *snake_case__ : Optional[Any] , **snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : List[str] , *snake_case__ : List[Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : Tuple=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Union[Dict[str, EncodedInput], BatchEncoding] , snake_case__ : Optional[int] = None , snake_case__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = super()._pad(
encoded_inputs=snake_case__ , max_length=snake_case__ , padding_strategy=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE = len(encoded_inputs['global_attention_mask'] ) != len(snake_case__ )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE = len(snake_case__ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 673 | 1 |
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
a_ : List[Any] = [
# (stable-diffusion, HF Diffusers)
("time_embed.0.weight", "time_embedding.linear_1.weight"),
("time_embed.0.bias", "time_embedding.linear_1.bias"),
("time_embed.2.weight", "time_embedding.linear_2.weight"),
("time_embed.2.bias", "time_embedding.linear_2.bias"),
("input_blocks.0.0.weight", "conv_in.weight"),
("input_blocks.0.0.bias", "conv_in.bias"),
("out.0.weight", "conv_norm_out.weight"),
("out.0.bias", "conv_norm_out.bias"),
("out.2.weight", "conv_out.weight"),
("out.2.bias", "conv_out.bias"),
]
a_ : Optional[Any] = [
# (stable-diffusion, HF Diffusers)
("in_layers.0", "norm1"),
("in_layers.2", "conv1"),
("out_layers.0", "norm2"),
("out_layers.3", "conv2"),
("emb_layers.1", "time_emb_proj"),
("skip_connection", "conv_shortcut"),
]
a_ : List[Any] = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
a_ : Tuple = F"""down_blocks.{i}.resnets.{j}."""
a_ : Dict = F"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
a_ : Dict = F"""down_blocks.{i}.attentions.{j}."""
a_ : Optional[int] = F"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
a_ : Any = F"""up_blocks.{i}.resnets.{j}."""
a_ : List[str] = F"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
a_ : List[Any] = F"""up_blocks.{i}.attentions.{j}."""
a_ : Union[str, Any] = F"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
a_ : int = F"""down_blocks.{i}.downsamplers.0.conv."""
a_ : str = F"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
a_ : Dict = F"""up_blocks.{i}.upsamplers.0."""
a_ : Optional[Any] = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
a_ : Dict = "mid_block.attentions.0."
a_ : Any = "middle_block.1."
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
a_ : Union[str, Any] = F"""mid_block.resnets.{j}."""
a_ : List[str] = F"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def __lowerCAmelCase ( _UpperCamelCase : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
SCREAMING_SNAKE_CASE = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
SCREAMING_SNAKE_CASE = v.replace(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
SCREAMING_SNAKE_CASE = v.replace(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = v
SCREAMING_SNAKE_CASE = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
a_ : Any = [
# (stable-diffusion, HF Diffusers)
("nin_shortcut", "conv_shortcut"),
("norm_out", "conv_norm_out"),
("mid.attn_1.", "mid_block.attentions.0."),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
a_ : str = F"""encoder.down_blocks.{i}.resnets.{j}."""
a_ : Optional[Any] = F"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
a_ : int = F"""down_blocks.{i}.downsamplers.0."""
a_ : Tuple = F"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
a_ : Optional[int] = F"""up_blocks.{i}.upsamplers.0."""
a_ : Optional[Any] = F"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
a_ : List[Any] = F"""decoder.up_blocks.{i}.resnets.{j}."""
a_ : Any = F"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
a_ : Union[str, Any] = F"""mid_block.resnets.{i}."""
a_ : List[Any] = F"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
a_ : int = [
# (stable-diffusion, HF Diffusers)
("norm.", "group_norm."),
("q.", "query."),
("k.", "key."),
("v.", "value."),
("proj_out.", "proj_attn."),
]
def __lowerCAmelCase ( _UpperCamelCase : Tuple ) -> List[Any]:
'''simple docstring'''
return w.reshape(*w.shape , 1 , 1 )
def __lowerCAmelCase ( _UpperCamelCase : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
SCREAMING_SNAKE_CASE = v.replace(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
SCREAMING_SNAKE_CASE = v.replace(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = v
SCREAMING_SNAKE_CASE = {v: vae_state_dict[k] for k, v in mapping.items()}
SCREAMING_SNAKE_CASE = ['q', 'k', 'v', 'proj_out']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"""mid.attn_1.{weight_name}.weight""" in k:
print(f"""Reshaping {k} for SD format""" )
SCREAMING_SNAKE_CASE = reshape_weight_for_sd(_UpperCamelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
a_ : Optional[int] = [
# (stable-diffusion, HF Diffusers)
("resblocks.", "text_model.encoder.layers."),
("ln_1", "layer_norm1"),
("ln_2", "layer_norm2"),
(".c_fc.", ".fc1."),
(".c_proj.", ".fc2."),
(".attn", ".self_attn"),
("ln_final.", "transformer.text_model.final_layer_norm."),
("token_embedding.weight", "transformer.text_model.embeddings.token_embedding.weight"),
("positional_embedding", "transformer.text_model.embeddings.position_embedding.weight"),
]
a_ : List[Any] = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
a_ : Any = re.compile("|".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
a_ : int = {"q": 0, "k": 1, "v": 2}
def __lowerCAmelCase ( _UpperCamelCase : Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = {}
for k, v in text_enc_dict.items():
if (
k.endswith('.self_attn.q_proj.weight' )
or k.endswith('.self_attn.k_proj.weight' )
or k.endswith('.self_attn.v_proj.weight' )
):
SCREAMING_SNAKE_CASE = k[: -len('.q_proj.weight' )]
SCREAMING_SNAKE_CASE = k[-len('q_proj.weight' )]
if k_pre not in capture_qkv_weight:
SCREAMING_SNAKE_CASE = [None, None, None]
SCREAMING_SNAKE_CASE = v
continue
if (
k.endswith('.self_attn.q_proj.bias' )
or k.endswith('.self_attn.k_proj.bias' )
or k.endswith('.self_attn.v_proj.bias' )
):
SCREAMING_SNAKE_CASE = k[: -len('.q_proj.bias' )]
SCREAMING_SNAKE_CASE = k[-len('q_proj.bias' )]
if k_pre not in capture_qkv_bias:
SCREAMING_SNAKE_CASE = [None, None, None]
SCREAMING_SNAKE_CASE = v
continue
SCREAMING_SNAKE_CASE = textenc_pattern.sub(lambda _UpperCamelCase : protected[re.escape(m.group(0 ) )] , _UpperCamelCase )
SCREAMING_SNAKE_CASE = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
SCREAMING_SNAKE_CASE = textenc_pattern.sub(lambda _UpperCamelCase : protected[re.escape(m.group(0 ) )] , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.cat(_UpperCamelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing' )
SCREAMING_SNAKE_CASE = textenc_pattern.sub(lambda _UpperCamelCase : protected[re.escape(m.group(0 ) )] , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.cat(_UpperCamelCase )
return new_state_dict
def __lowerCAmelCase ( _UpperCamelCase : List[str] ) -> List[Any]:
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
a_ : Any = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--use_safetensors", action="store_true", help="Save weights use safetensors, default is ckpt."
)
a_ : Any = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
a_ : List[str] = osp.join(args.model_path, "unet", "diffusion_pytorch_model.safetensors")
a_ : Dict = osp.join(args.model_path, "vae", "diffusion_pytorch_model.safetensors")
a_ : Tuple = osp.join(args.model_path, "text_encoder", "model.safetensors")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
a_ : Optional[Any] = load_file(unet_path, device="cpu")
else:
a_ : List[Any] = osp.join(args.model_path, "unet", "diffusion_pytorch_model.bin")
a_ : Dict = torch.load(unet_path, map_location="cpu")
if osp.exists(vae_path):
a_ : Tuple = load_file(vae_path, device="cpu")
else:
a_ : Tuple = osp.join(args.model_path, "vae", "diffusion_pytorch_model.bin")
a_ : int = torch.load(vae_path, map_location="cpu")
if osp.exists(text_enc_path):
a_ : Tuple = load_file(text_enc_path, device="cpu")
else:
a_ : str = osp.join(args.model_path, "text_encoder", "pytorch_model.bin")
a_ : List[str] = torch.load(text_enc_path, map_location="cpu")
# Convert the UNet model
a_ : str = convert_unet_state_dict(unet_state_dict)
a_ : Any = {"model.diffusion_model." + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
a_ : Dict = convert_vae_state_dict(vae_state_dict)
a_ : str = {"first_stage_model." + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
a_ : Union[str, Any] = "text_model.encoder.layers.22.layer_norm2.bias" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
a_ : Union[str, Any] = {"transformer." + k: v for k, v in text_enc_dict.items()}
a_ : Dict = convert_text_enc_state_dict_vaa(text_enc_dict)
a_ : List[Any] = {"cond_stage_model.model." + k: v for k, v in text_enc_dict.items()}
else:
a_ : Optional[int] = convert_text_enc_state_dict(text_enc_dict)
a_ : Optional[int] = {"cond_stage_model.transformer." + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
a_ : List[str] = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
a_ : Any = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
a_ : Optional[int] = {"state_dict": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 673 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __lowerCAmelCase ( *_UpperCamelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(_UpperCamelCase , 'r' ) as fh:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_EX )
try:
print(*_UpperCamelCase )
finally:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_UN )
a_ : int = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
a_ : str = torch.device("cuda", local_rank)
a_ : Optional[int] = socket.gethostname()
a_ : Union[str, Any] = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
a_ : Dict = dist.get_rank()
a_ : Any = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 673 | 1 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
a_ : List[Any] = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
a_ : str = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode("utf-8").split()
a_ : Any = "|".join(sys.argv[1:])
a_ : int = re.compile(RF"""^({joined_dirs}).*?\.py$""")
a_ : Optional[int] = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 673 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
a_ : Union[str, Any] = {"allegro/herbert-base-cased": 514}
a_ : List[Any] = {}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =HerbertTokenizer
def __init__( self : Tuple , snake_case__ : Optional[Any]=None , snake_case__ : int=None , snake_case__ : Optional[int]=None , snake_case__ : str="<s>" , snake_case__ : Tuple="<unk>" , snake_case__ : List[str]="<pad>" , snake_case__ : Tuple="<mask>" , snake_case__ : Dict="</s>" , **snake_case__ : List[str] , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , sep_token=snake_case__ , **snake_case__ , )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 673 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ : Tuple = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = ["PoolFormerFeatureExtractor"]
a_ : Tuple = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
a_ : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 673 |
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = abs(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
while n > 0:
res += n % 10
n //= 10
return res
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = abs(_UpperCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
return sum(int(_UpperCamelCase ) for c in str(abs(_UpperCamelCase ) ) )
def __lowerCAmelCase ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_UpperCamelCase : Callable , _UpperCamelCase : int ) -> None:
SCREAMING_SNAKE_CASE = f"""{func.__name__}({value})"""
SCREAMING_SNAKE_CASE = timeit(f"""__main__.{call}""" , setup='import __main__' )
print(f"""{call:56} = {func(_UpperCamelCase )} -- {timing:.4f} seconds""" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_UpperCamelCase , _UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 673 | 1 |
from cva import destroyAllWindows, imread, imshow, waitKey
def __lowerCAmelCase ( _UpperCamelCase : Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = [2_55, 2_55, 2_55] - img[i][j]
return img
if __name__ == "__main__":
# read original image
a_ : Any = imread("image_data/lena.jpg", 1)
# convert to its negative
a_ : Dict = convert_to_negative(img)
# show result image
imshow("negative of original image", img)
waitKey(0)
destroyAllWindows()
| 673 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 673 | 1 |
from __future__ import annotations
from collections import deque
class UpperCamelCase :
def __init__( self : Optional[Any] , snake_case__ : list[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
self.adlist.append(
{'value': '', 'next_states': [], 'fail_state': 0, 'output': []} )
for keyword in keywords:
self.add_keyword(snake_case__ )
self.set_fail_transitions()
def UpperCamelCase ( self : List[Any] , snake_case__ : int , snake_case__ : str ):
"""simple docstring"""
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def UpperCamelCase ( self : str , snake_case__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 0
for character in keyword:
SCREAMING_SNAKE_CASE = self.find_next_state(snake_case__ , snake_case__ )
if next_state is None:
self.adlist.append(
{
'value': character,
'next_states': [],
'fail_state': 0,
'output': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
SCREAMING_SNAKE_CASE = len(self.adlist ) - 1
else:
SCREAMING_SNAKE_CASE = next_state
self.adlist[current_state]["output"].append(snake_case__ )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = deque()
for node in self.adlist[0]["next_states"]:
q.append(snake_case__ )
SCREAMING_SNAKE_CASE = 0
while q:
SCREAMING_SNAKE_CASE = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(snake_case__ )
SCREAMING_SNAKE_CASE = self.adlist[r]['fail_state']
while (
self.find_next_state(snake_case__ , self.adlist[child]['value'] ) is None
and state != 0
):
SCREAMING_SNAKE_CASE = self.adlist[state]['fail_state']
SCREAMING_SNAKE_CASE = self.find_next_state(
snake_case__ , self.adlist[child]['value'] )
if self.adlist[child]["fail_state"] is None:
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = (
self.adlist[child]['output']
+ self.adlist[self.adlist[child]['fail_state']]['output']
)
def UpperCamelCase ( self : List[str] , snake_case__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {} # returns a dict with keywords and list of its occurrences
SCREAMING_SNAKE_CASE = 0
for i in range(len(snake_case__ ) ):
while (
self.find_next_state(snake_case__ , string[i] ) is None
and current_state != 0
):
SCREAMING_SNAKE_CASE = self.adlist[current_state]['fail_state']
SCREAMING_SNAKE_CASE = self.find_next_state(snake_case__ , string[i] )
if next_state is None:
SCREAMING_SNAKE_CASE = 0
else:
SCREAMING_SNAKE_CASE = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
SCREAMING_SNAKE_CASE = []
result[key].append(i - len(snake_case__ ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =AudioLDMPipeline
__UpperCamelCase =TEXT_TO_AUDIO_PARAMS
__UpperCamelCase =TEXT_TO_AUDIO_BATCH_PARAMS
__UpperCamelCase =frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(3_2, 6_4) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=3_2 , class_embeddings_concat=snake_case__ , )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , projection_dim=3_2 , )
SCREAMING_SNAKE_CASE = ClapTextModelWithProjection(snake_case__ )
SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=7_7 )
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6_0_0_0 , upsample_initial_channel=1_6 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=snake_case__ , )
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(snake_case__ )
SCREAMING_SNAKE_CASE = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def UpperCamelCase ( self : Optional[int] , snake_case__ : int , snake_case__ : int=0 ):
"""simple docstring"""
if str(snake_case__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE = torch.manual_seed(snake_case__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 2_5_6
SCREAMING_SNAKE_CASE = audio[:1_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder(
snake_case__ , )
SCREAMING_SNAKE_CASE = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE = F.normalize(snake_case__ , dim=-1 )
SCREAMING_SNAKE_CASE = prompt_embeds
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE = negative_prompt
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE = []
for p in [prompt, negative_prompt]:
SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder(
snake_case__ , )
SCREAMING_SNAKE_CASE = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE = F.normalize(snake_case__ , dim=-1 )
embeds.append(snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = embeds
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 'egg cracking'
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ , negative_prompt=snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 2_5_6
SCREAMING_SNAKE_CASE = audio[:1_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_5_6)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_5_6)
# test num_waveforms_per_prompt for single prompt
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=2 , num_waveforms_per_prompt=snake_case__ ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_5_6)
# test num_waveforms_per_prompt for batch of prompts
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=snake_case__ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6)
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config.sampling_rate
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.016 , **snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) / vocoder_sampling_rate == 0.016
SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.032 , **snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) / vocoder_sampling_rate == 0.032
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = ['hey']
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=1 )
SCREAMING_SNAKE_CASE = output.audios.shape
assert audio_shape == (1, 2_5_6)
SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(snake_case__ ).to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=1 )
SCREAMING_SNAKE_CASE = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_5_6)
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=snake_case__ )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case__ )
@slow
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : int , snake_case__ : int , snake_case__ : Tuple="cpu" , snake_case__ : List[str]=torch.floataa , snake_case__ : Optional[Any]=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = np.random.RandomState(snake_case__ ).standard_normal((1, 8, 1_2_8, 1_6) )
SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 2_5
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ).audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 8_1_9_2_0
SCREAMING_SNAKE_CASE = audio[7_7_2_3_0:7_7_2_4_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315] )
SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ).audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 8_1_9_2_0
SCREAMING_SNAKE_CASE = audio[2_7_7_8_0:2_7_7_9_0]
SCREAMING_SNAKE_CASE = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212] )
SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 673 | 1 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
a_ : int = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = ["DPTFeatureExtractor"]
a_ : Optional[int] = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
a_ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 673 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase :
def __init__( self : Dict , snake_case__ : str , snake_case__ : str=1_3 , snake_case__ : Tuple=7 , snake_case__ : Tuple=True , snake_case__ : Tuple=True , snake_case__ : List[str]=False , snake_case__ : Any=True , snake_case__ : Union[str, Any]=9_9 , snake_case__ : Dict=3_2 , snake_case__ : Optional[Any]=5 , snake_case__ : Optional[Any]=4 , snake_case__ : Union[str, Any]=3_7 , snake_case__ : Tuple="gelu" , snake_case__ : Dict=0.1 , snake_case__ : Any=0.1 , snake_case__ : int=5_1_2 , snake_case__ : Dict=1_6 , snake_case__ : str=2 , snake_case__ : Any=0.02 , snake_case__ : List[str]=3 , snake_case__ : int=4 , snake_case__ : List[str]=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , use_stable_embedding=snake_case__ , )
def UpperCamelCase ( self : int , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : str , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : str , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , )
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , )
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Tuple , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : int , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , use_cache=snake_case__ , )
SCREAMING_SNAKE_CASE = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0]
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , past_key_values=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCamelCase =(OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCamelCase =(
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'single_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'multi_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCamelCase ( self : str , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = ids_tensor([1, 1_0] , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
SCREAMING_SNAKE_CASE = original_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE = original_model(snake_case__ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
SCREAMING_SNAKE_CASE = scaled_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
| 673 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a_ : Optional[int] = logging.get_logger(__name__)
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : int=False ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'deit.embeddings.cls_token'),
('dist_token', 'deit.embeddings.distillation_token'),
('patch_embed.proj.weight', 'deit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'deit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'deit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
SCREAMING_SNAKE_CASE = [(pair[0], pair[1][4:]) if pair[1].startswith('deit' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('norm.weight', 'deit.layernorm.weight'),
('norm.bias', 'deit.layernorm.bias'),
('head.weight', 'cls_classifier.weight'),
('head.bias', 'cls_classifier.bias'),
('head_dist.weight', 'distillation_classifier.weight'),
('head_dist.bias', 'distillation_classifier.bias'),
] )
return rename_keys
def __lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Tuple=False ) -> Optional[int]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE = ''
else:
SCREAMING_SNAKE_CASE = 'deit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
SCREAMING_SNAKE_CASE = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :]
def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dct.pop(_UpperCamelCase )
SCREAMING_SNAKE_CASE = val
def __lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DeiTConfig()
# all deit models have fine-tuned heads
SCREAMING_SNAKE_CASE = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
SCREAMING_SNAKE_CASE = 10_00
SCREAMING_SNAKE_CASE = 'huggingface/label-files'
SCREAMING_SNAKE_CASE = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = int(deit_name[-6:-4] )
SCREAMING_SNAKE_CASE = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('tiny' ):
SCREAMING_SNAKE_CASE = 1_92
SCREAMING_SNAKE_CASE = 7_68
SCREAMING_SNAKE_CASE = 12
SCREAMING_SNAKE_CASE = 3
elif deit_name[9:].startswith('small' ):
SCREAMING_SNAKE_CASE = 3_84
SCREAMING_SNAKE_CASE = 15_36
SCREAMING_SNAKE_CASE = 12
SCREAMING_SNAKE_CASE = 6
if deit_name[9:].startswith('base' ):
pass
elif deit_name[4:].startswith('large' ):
SCREAMING_SNAKE_CASE = 10_24
SCREAMING_SNAKE_CASE = 40_96
SCREAMING_SNAKE_CASE = 24
SCREAMING_SNAKE_CASE = 16
# load original model from timm
SCREAMING_SNAKE_CASE = timm.create_model(_UpperCamelCase , pretrained=_UpperCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE = timm_model.state_dict()
SCREAMING_SNAKE_CASE = create_rename_keys(_UpperCamelCase , _UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
read_in_q_k_v(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# load HuggingFace model
SCREAMING_SNAKE_CASE = DeiTForImageClassificationWithTeacher(_UpperCamelCase ).eval()
model.load_state_dict(_UpperCamelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
SCREAMING_SNAKE_CASE = int(
(2_56 / 2_24) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
SCREAMING_SNAKE_CASE = DeiTImageProcessor(size=_UpperCamelCase , crop_size=config.image_size )
SCREAMING_SNAKE_CASE = image_processor(images=prepare_img() , return_tensors='pt' )
SCREAMING_SNAKE_CASE = encoding['pixel_values']
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
SCREAMING_SNAKE_CASE = timm_model(_UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_UpperCamelCase , outputs.logits , atol=1e-3 )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
a_ : Optional[Any] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 673 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="openai/whisper-base"
__UpperCamelCase =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__UpperCamelCase ="transcriber"
__UpperCamelCase =WhisperProcessor
__UpperCamelCase =WhisperForConditionalGeneration
__UpperCamelCase =["audio"]
__UpperCamelCase =["text"]
def UpperCamelCase ( self : Dict , snake_case__ : Tuple ):
"""simple docstring"""
return self.pre_processor(snake_case__ , return_tensors='pt' ).input_features
def UpperCamelCase ( self : Optional[int] , snake_case__ : Tuple ):
"""simple docstring"""
return self.model.generate(inputs=snake_case__ )
def UpperCamelCase ( self : str , snake_case__ : Union[str, Any] ):
"""simple docstring"""
return self.pre_processor.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )[0]
| 673 | 1 |
from manim import *
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*snake_case__ ).arrange(snake_case__ , buff=0 )
SCREAMING_SNAKE_CASE = VGroup(*snake_case__ ).arrange(snake_case__ , buff=0 )
SCREAMING_SNAKE_CASE = VGroup(snake_case__ , snake_case__ ).arrange(snake_case__ , buff=0 )
SCREAMING_SNAKE_CASE = Text('CPU' , font_size=2_4 )
SCREAMING_SNAKE_CASE = Group(snake_case__ , snake_case__ ).arrange(snake_case__ , buff=0.5 , aligned_edge=snake_case__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case__ )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(1 )]
SCREAMING_SNAKE_CASE = VGroup(*snake_case__ ).arrange(snake_case__ , buff=0 )
SCREAMING_SNAKE_CASE = Text('GPU' , font_size=2_4 )
SCREAMING_SNAKE_CASE = Group(snake_case__ , snake_case__ ).arrange(snake_case__ , buff=0.5 , aligned_edge=snake_case__ )
gpu.align_to(snake_case__ , snake_case__ )
gpu.set_x(gpu.get_x() - 1 )
self.add(snake_case__ )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*snake_case__ ).arrange(snake_case__ , buff=0 )
SCREAMING_SNAKE_CASE = Text('Model' , font_size=2_4 )
SCREAMING_SNAKE_CASE = Group(snake_case__ , snake_case__ ).arrange(snake_case__ , buff=0.5 , aligned_edge=snake_case__ )
model.move_to([3, -1.0, 0] )
self.play(
Create(snake_case__ , run_time=1 ) , Create(snake_case__ , run_time=1 ) , Create(snake_case__ , run_time=1 ) , )
SCREAMING_SNAKE_CASE = MarkupText(
F"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=2_4 , )
SCREAMING_SNAKE_CASE = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case__ , run_time=2.5 ) , Write(snake_case__ ) , Write(snake_case__ ) )
self.add(snake_case__ )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for i, rect in enumerate(snake_case__ ):
SCREAMING_SNAKE_CASE = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(snake_case__ , opacity=0.7 )
cpu_target.move_to(snake_case__ )
cpu_target.generate_target()
SCREAMING_SNAKE_CASE = 0.46 / 4
SCREAMING_SNAKE_CASE = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case__ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=snake_case__ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=snake_case__ , buff=0.0 )
cpu_targs.append(snake_case__ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(snake_case__ ) )
second_animations.append(MoveToTarget(snake_case__ , run_time=1.5 ) )
self.play(*snake_case__ )
self.play(*snake_case__ )
self.wait()
| 673 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
a_ : List[str] = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
a_ : Dict = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
a_ : str = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
a_ : int = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def UpperCamelCase ( self : str ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def UpperCamelCase ( self : Dict , snake_case__ : int ):
"""simple docstring"""
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : List[Any]=0.9 , snake_case__ : Optional[Any]=3 , snake_case__ : Any=0.5 ):
"""simple docstring"""
if NLTK_VERSION >= version.Version('3.6.5' ):
SCREAMING_SNAKE_CASE = [
meteor_score.single_meteor_score(
word_tokenize(snake_case__ ) , word_tokenize(snake_case__ ) , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
else:
SCREAMING_SNAKE_CASE = [
meteor_score.single_meteor_score(snake_case__ , snake_case__ , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
return {"meteor": np.mean(snake_case__ )}
| 673 | 1 |
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : Tuple , snake_case__ : Any="" , snake_case__ : Tuple="train" ):
"""simple docstring"""
assert os.path.isdir(snake_case__ )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = os.listdir(snake_case__ )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
SCREAMING_SNAKE_CASE = os.path.join(snake_case__ , snake_case__ )
if not os.path.isfile(snake_case__ ):
continue
self.documents.append(snake_case__ )
def __len__( self : Optional[Any] ):
"""simple docstring"""
return len(self.documents )
def __getitem__( self : int , snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.documents[idx]
SCREAMING_SNAKE_CASE = document_path.split('/' )[-1]
with open(snake_case__ , encoding='utf-8' ) as source:
SCREAMING_SNAKE_CASE = source.read()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = process_story(snake_case__ )
return document_name, story_lines, summary_lines
def __lowerCAmelCase ( _UpperCamelCase : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = list(filter(lambda _UpperCamelCase : len(_UpperCamelCase ) != 0 , [line.strip() for line in raw_story.split('\n' )] ) )
# for some unknown reason some lines miss a period, add it
SCREAMING_SNAKE_CASE = [_add_missing_period(_UpperCamelCase ) for line in nonempty_lines]
# gather article lines
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = deque(_UpperCamelCase )
while True:
try:
SCREAMING_SNAKE_CASE = lines.popleft()
if element.startswith('@highlight' ):
break
story_lines.append(_UpperCamelCase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
SCREAMING_SNAKE_CASE = list(filter(lambda _UpperCamelCase : not t.startswith('@highlight' ) , _UpperCamelCase ) )
return story_lines, summary_lines
def __lowerCAmelCase ( _UpperCamelCase : Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')']
if line.startswith('@highlight' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : List[str] , _UpperCamelCase : str ) -> Tuple:
'''simple docstring'''
if len(_UpperCamelCase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(_UpperCamelCase )) )
return sequence
def __lowerCAmelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = torch.ones_like(_UpperCamelCase )
SCREAMING_SNAKE_CASE = sequence == pad_token_id
SCREAMING_SNAKE_CASE = 0
return mask
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [tokenizer.encode(_UpperCamelCase ) for line in story_lines]
SCREAMING_SNAKE_CASE = [token for sentence in story_lines_token_ids for token in sentence]
SCREAMING_SNAKE_CASE = [tokenizer.encode(_UpperCamelCase ) for line in summary_lines]
SCREAMING_SNAKE_CASE = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for sequence in batch:
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(_UpperCamelCase )
return torch.tensor(_UpperCamelCase )
| 673 |
import numpy as np
def __lowerCAmelCase ( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def __lowerCAmelCase ( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCamelCase ( unittest.TestCase ):
def __init__( self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple=7 , snake_case__ : List[Any]=3 , snake_case__ : str=1_8 , snake_case__ : Dict=3_0 , snake_case__ : Tuple=4_0_0 , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=None , snake_case__ : int=True , snake_case__ : List[Any]=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = size if size is not None else {'shortest_edge': 2_0}
SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_center_crop
SCREAMING_SNAKE_CASE = crop_size
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =MobileNetVaImageProcessor if is_vision_available() else None
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MobileNetVaImageProcessingTester(self )
@property
def UpperCamelCase ( self : Any ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , 'do_resize' ) )
self.assertTrue(hasattr(snake_case__ , 'size' ) )
self.assertTrue(hasattr(snake_case__ , 'do_center_crop' ) )
self.assertTrue(hasattr(snake_case__ , 'crop_size' ) )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 2_0} )
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} )
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
pass
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(snake_case__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(snake_case__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(snake_case__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 673 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a_ : Any = logging.get_logger(__name__)
a_ : Dict = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="van"
def __init__( self : Optional[Any] , snake_case__ : Tuple=2_2_4 , snake_case__ : Dict=3 , snake_case__ : Union[str, Any]=[7, 3, 3, 3] , snake_case__ : str=[4, 2, 2, 2] , snake_case__ : Optional[Any]=[6_4, 1_2_8, 3_2_0, 5_1_2] , snake_case__ : Optional[Any]=[3, 3, 1_2, 3] , snake_case__ : Tuple=[8, 8, 4, 4] , snake_case__ : Any="gelu" , snake_case__ : Dict=0.02 , snake_case__ : List[str]=1E-6 , snake_case__ : int=1E-2 , snake_case__ : Any=0.0 , snake_case__ : Tuple=0.0 , **snake_case__ : Any , ):
"""simple docstring"""
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_sizes
SCREAMING_SNAKE_CASE = strides
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = mlp_ratios
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = layer_scale_init_value
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = dropout_rate
| 673 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : Union[str, Any] , snake_case__ : TransformeraDModel , snake_case__ : AutoencoderKL , snake_case__ : KarrasDiffusionSchedulers , snake_case__ : Optional[Dict[int, str]] = None , ):
"""simple docstring"""
super().__init__()
self.register_modules(transformer=snake_case__ , vae=snake_case__ , scheduler=snake_case__ )
# create a imagenet -> id dictionary for easier use
SCREAMING_SNAKE_CASE = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(',' ):
SCREAMING_SNAKE_CASE = int(snake_case__ )
SCREAMING_SNAKE_CASE = dict(sorted(self.labels.items() ) )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Union[str, List[str]] ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = list(snake_case__ )
for l in label:
if l not in self.labels:
raise ValueError(
F"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : List[str] , snake_case__ : List[int] , snake_case__ : float = 4.0 , snake_case__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case__ : int = 5_0 , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = len(snake_case__ )
SCREAMING_SNAKE_CASE = self.transformer.config.sample_size
SCREAMING_SNAKE_CASE = self.transformer.config.in_channels
SCREAMING_SNAKE_CASE = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=snake_case__ , device=self.device , dtype=self.transformer.dtype , )
SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
SCREAMING_SNAKE_CASE = torch.tensor(snake_case__ , device=self.device ).reshape(-1 )
SCREAMING_SNAKE_CASE = torch.tensor([1_0_0_0] * batch_size , device=self.device )
SCREAMING_SNAKE_CASE = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(snake_case__ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
SCREAMING_SNAKE_CASE = latent_model_input[: len(snake_case__ ) // 2]
SCREAMING_SNAKE_CASE = torch.cat([half, half] , dim=0 )
SCREAMING_SNAKE_CASE = self.scheduler.scale_model_input(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = t
if not torch.is_tensor(snake_case__ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
SCREAMING_SNAKE_CASE = latent_model_input.device.type == 'mps'
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = torch.floataa if is_mps else torch.floataa
else:
SCREAMING_SNAKE_CASE = torch.intaa if is_mps else torch.intaa
SCREAMING_SNAKE_CASE = torch.tensor([timesteps] , dtype=snake_case__ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
SCREAMING_SNAKE_CASE = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
SCREAMING_SNAKE_CASE = self.transformer(
snake_case__ , timestep=snake_case__ , class_labels=snake_case__ ).sample
# perform guidance
if guidance_scale > 1:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torch.split(snake_case__ , len(snake_case__ ) // 2 , dim=0 )
SCREAMING_SNAKE_CASE = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
SCREAMING_SNAKE_CASE = torch.cat([half_eps, half_eps] , dim=0 )
SCREAMING_SNAKE_CASE = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torch.split(snake_case__ , snake_case__ , dim=1 )
else:
SCREAMING_SNAKE_CASE = noise_pred
# compute previous image: x_t -> x_t-1
SCREAMING_SNAKE_CASE = self.scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
if guidance_scale > 1:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = latent_model_input.chunk(2 , dim=0 )
else:
SCREAMING_SNAKE_CASE = latent_model_input
SCREAMING_SNAKE_CASE = 1 / self.vae.config.scaling_factor * latents
SCREAMING_SNAKE_CASE = self.vae.decode(snake_case__ ).sample
SCREAMING_SNAKE_CASE = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
SCREAMING_SNAKE_CASE = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=snake_case__ )
| 673 |
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : int ) -> list[str]:
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(_UpperCamelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 673 | 1 |
from ....utils import logging
a_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : str , snake_case__ : List[Any] , snake_case__ : Tuple=None , snake_case__ : Optional[Any]=2_0_4_8 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = config.__dict__
SCREAMING_SNAKE_CASE = modal_hidden_size
if num_labels:
SCREAMING_SNAKE_CASE = num_labels
| 673 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
a_ : Dict = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : Any , **snake_case__ : Optional[int] ):
"""simple docstring"""
super().__init__(**snake_case__ )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , 'vision' )
self.check_model_type(snake_case__ )
def __call__( self : List[Any] , snake_case__ : Union[str, "Image.Image", List[Dict[str, Any]]] , snake_case__ : Union[str, List[str]] = None , **snake_case__ : Union[str, Any] , ):
"""simple docstring"""
if "text_queries" in kwargs:
SCREAMING_SNAKE_CASE = kwargs.pop('text_queries' )
if isinstance(snake_case__ , (str, Image.Image) ):
SCREAMING_SNAKE_CASE = {'image': image, 'candidate_labels': candidate_labels}
else:
SCREAMING_SNAKE_CASE = image
SCREAMING_SNAKE_CASE = super().__call__(snake_case__ , **snake_case__ )
return results
def UpperCamelCase ( self : Union[str, Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {}
if "threshold" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['threshold']
if "top_k" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['top_k']
return {}, {}, postprocess_params
def UpperCamelCase ( self : List[Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = load_image(inputs['image'] )
SCREAMING_SNAKE_CASE = inputs['candidate_labels']
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = candidate_labels.split(',' )
SCREAMING_SNAKE_CASE = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(snake_case__ ):
SCREAMING_SNAKE_CASE = self.tokenizer(snake_case__ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE = self.image_processor(snake_case__ , return_tensors=self.framework )
yield {
"is_last": i == len(snake_case__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def UpperCamelCase ( self : Any , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = model_inputs.pop('target_size' )
SCREAMING_SNAKE_CASE = model_inputs.pop('candidate_label' )
SCREAMING_SNAKE_CASE = model_inputs.pop('is_last' )
SCREAMING_SNAKE_CASE = self.model(**snake_case__ )
SCREAMING_SNAKE_CASE = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : str=0.1 , snake_case__ : Union[str, Any]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
for model_output in model_outputs:
SCREAMING_SNAKE_CASE = model_output['candidate_label']
SCREAMING_SNAKE_CASE = BaseModelOutput(snake_case__ )
SCREAMING_SNAKE_CASE = self.image_processor.post_process_object_detection(
outputs=snake_case__ , threshold=snake_case__ , target_sizes=model_output['target_size'] )[0]
for index in outputs["scores"].nonzero():
SCREAMING_SNAKE_CASE = outputs['scores'][index].item()
SCREAMING_SNAKE_CASE = self._get_bounding_box(outputs['boxes'][index][0] )
SCREAMING_SNAKE_CASE = {'score': score, 'label': label, 'box': box}
results.append(snake_case__ )
SCREAMING_SNAKE_CASE = sorted(snake_case__ , key=lambda snake_case__ : x["score"] , reverse=snake_case__ )
if top_k:
SCREAMING_SNAKE_CASE = results[:top_k]
return results
def UpperCamelCase ( self : List[Any] , snake_case__ : "torch.Tensor" ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = box.int().tolist()
SCREAMING_SNAKE_CASE = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 673 | 1 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : str = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[Any] , snake_case__ : Union[str, Any]=None , snake_case__ : str=None , *snake_case__ : Optional[Any] , **snake_case__ : Dict ):
"""simple docstring"""
super().__init__(*snake_case__ , **snake_case__ )
if config is None:
assert isinstance(self.model , snake_case__ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F""" {self.model.__class__}"""
)
SCREAMING_SNAKE_CASE = self.model.config
else:
SCREAMING_SNAKE_CASE = config
SCREAMING_SNAKE_CASE = data_args
SCREAMING_SNAKE_CASE = self.config.tgt_vocab_size if isinstance(self.config , snake_case__ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
' padding..' )
if self.args.label_smoothing == 0:
SCREAMING_SNAKE_CASE = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
SCREAMING_SNAKE_CASE = label_smoothed_nll_loss
def UpperCamelCase ( self : str , snake_case__ : int ):
"""simple docstring"""
if self.optimizer is None:
SCREAMING_SNAKE_CASE = ['bias', 'LayerNorm.weight']
SCREAMING_SNAKE_CASE = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
SCREAMING_SNAKE_CASE = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
SCREAMING_SNAKE_CASE = Adafactor
SCREAMING_SNAKE_CASE = {'scale_parameter': False, 'relative_step': False}
else:
SCREAMING_SNAKE_CASE = AdamW
SCREAMING_SNAKE_CASE = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
SCREAMING_SNAKE_CASE = self.args.learning_rate
if self.sharded_ddp:
SCREAMING_SNAKE_CASE = OSS(
params=snake_case__ , optim=snake_case__ , **snake_case__ , )
else:
SCREAMING_SNAKE_CASE = optimizer_cls(snake_case__ , **snake_case__ )
if self.lr_scheduler is None:
SCREAMING_SNAKE_CASE = self._get_lr_scheduler(snake_case__ )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def UpperCamelCase ( self : List[Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
SCREAMING_SNAKE_CASE = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
SCREAMING_SNAKE_CASE = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
SCREAMING_SNAKE_CASE = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=snake_case__ )
return scheduler
def UpperCamelCase ( self : Any ):
"""simple docstring"""
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def UpperCamelCase ( self : List[str] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
SCREAMING_SNAKE_CASE = model(**snake_case__ , use_cache=snake_case__ )[0]
SCREAMING_SNAKE_CASE = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model(**snake_case__ , labels=snake_case__ , use_cache=snake_case__ )[:2]
else:
# compute label smoothed loss
SCREAMING_SNAKE_CASE = model(**snake_case__ , use_cache=snake_case__ )[0]
SCREAMING_SNAKE_CASE = torch.nn.functional.log_softmax(snake_case__ , dim=-1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.loss_fn(snake_case__ , snake_case__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Dict , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = inputs.pop('labels' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._compute_loss(snake_case__ , snake_case__ , snake_case__ )
return loss
def UpperCamelCase ( self : Any , snake_case__ : nn.Module , snake_case__ : Dict[str, Union[torch.Tensor, Any]] , snake_case__ : bool , snake_case__ : Optional[List[str]] = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._prepare_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
SCREAMING_SNAKE_CASE = self.model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , **snake_case__ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
SCREAMING_SNAKE_CASE = self._pad_tensors_to_max_len(snake_case__ , gen_kwargs['max_length'] )
SCREAMING_SNAKE_CASE = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._compute_loss(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
SCREAMING_SNAKE_CASE = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
SCREAMING_SNAKE_CASE = self._pad_tensors_to_max_len(snake_case__ , gen_kwargs['max_length'] )
return (loss, logits, labels)
def UpperCamelCase ( self : List[Any] , snake_case__ : Optional[int] , snake_case__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F""" padded to `max_length`={max_length}""" )
SCREAMING_SNAKE_CASE = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
SCREAMING_SNAKE_CASE = tensor
return padded_tensor
| 673 |
def __lowerCAmelCase ( _UpperCamelCase : int = 10_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 2**power
SCREAMING_SNAKE_CASE = str(_UpperCamelCase )
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
for i in list_num:
sum_of_num += int(_UpperCamelCase )
return sum_of_num
if __name__ == "__main__":
a_ : List[str] = int(input("Enter the power of 2: ").strip())
print("2 ^ ", power, " = ", 2**power)
a_ : int = solution(power)
print("Sum of the digits is: ", result)
| 673 | 1 |
from __future__ import annotations
def __lowerCAmelCase ( _UpperCamelCase : list , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int ) -> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
SCREAMING_SNAKE_CASE = result + left + right
return input_list
def __lowerCAmelCase ( _UpperCamelCase : list ) -> list:
'''simple docstring'''
if len(_UpperCamelCase ) <= 1:
return input_list
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
# iteration for two-way merging
SCREAMING_SNAKE_CASE = 2
while p <= len(_UpperCamelCase ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = i
SCREAMING_SNAKE_CASE = i + p - 1
SCREAMING_SNAKE_CASE = (low + high + 1) // 2
SCREAMING_SNAKE_CASE = merge(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# final merge of last two parts
if p * 2 >= len(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = i
SCREAMING_SNAKE_CASE = merge(_UpperCamelCase , 0 , _UpperCamelCase , len(_UpperCamelCase ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
a_ : Dict = input("Enter numbers separated by a comma:\n").strip()
if user_input == "":
a_ : str = []
else:
a_ : str = [int(item.strip()) for item in user_input.split(",")]
print(iter_merge_sort(unsorted))
| 673 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="facebook/bart-large-mnli"
__UpperCamelCase =(
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
__UpperCamelCase ="text_classifier"
__UpperCamelCase =AutoTokenizer
__UpperCamelCase =AutoModelForSequenceClassification
__UpperCamelCase =["text", ["text"]]
__UpperCamelCase =["text"]
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().setup()
SCREAMING_SNAKE_CASE = self.model.config
SCREAMING_SNAKE_CASE = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
SCREAMING_SNAKE_CASE = int(snake_case__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[str] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = labels
return self.pre_processor(
[text] * len(snake_case__ ) , [F"""This example is {label}""" for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCamelCase ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = outputs.logits
SCREAMING_SNAKE_CASE = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 673 | 1 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
a_ : Union[str, Any] = {"allegro/herbert-base-cased": 514}
a_ : List[Any] = {}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =HerbertTokenizer
def __init__( self : Tuple , snake_case__ : Optional[Any]=None , snake_case__ : int=None , snake_case__ : Optional[int]=None , snake_case__ : str="<s>" , snake_case__ : Tuple="<unk>" , snake_case__ : List[str]="<pad>" , snake_case__ : Tuple="<mask>" , snake_case__ : Dict="</s>" , **snake_case__ : List[str] , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , sep_token=snake_case__ , **snake_case__ , )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 673 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
a_ : str = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
a_ : int = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.1_5},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
a_ : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a_ : List[Any] = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
a_ : Any = "allenai"
def __lowerCAmelCase ( _UpperCamelCase : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict((re.sub(R'@@$' , '' , _UpperCamelCase ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , _UpperCamelCase ), v) for k, v in d.items() )
SCREAMING_SNAKE_CASE = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
SCREAMING_SNAKE_CASE = d[k] # restore
return da
def __lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : List[str] ) -> Dict:
'''simple docstring'''
assert os.path.exists(_UpperCamelCase )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
SCREAMING_SNAKE_CASE = basename(_UpperCamelCase )
SCREAMING_SNAKE_CASE = dirname(_UpperCamelCase )
SCREAMING_SNAKE_CASE = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
SCREAMING_SNAKE_CASE = cls.hub_models()
SCREAMING_SNAKE_CASE = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
SCREAMING_SNAKE_CASE = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f"""using checkpoint {checkpoint_file}""" )
SCREAMING_SNAKE_CASE = hub_utils.from_pretrained(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , archive_map=_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = vars(chkpt['args']['model'] )
SCREAMING_SNAKE_CASE = args['source_lang']
SCREAMING_SNAKE_CASE = args['target_lang']
SCREAMING_SNAKE_CASE = dirname(_UpperCamelCase )
SCREAMING_SNAKE_CASE = basename(_UpperCamelCase )
# dicts
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , f"""dict.{src_lang}.txt""" )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , f"""dict.{tgt_lang}.txt""" )
SCREAMING_SNAKE_CASE = Dictionary.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = rewrite_dict_keys(src_dict.indices )
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'vocab-src.json' )
print(f"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
SCREAMING_SNAKE_CASE = True
for k in src_vocab.keys():
if not k.islower():
SCREAMING_SNAKE_CASE = False
break
SCREAMING_SNAKE_CASE = Dictionary.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = rewrite_dict_keys(tgt_dict.indices )
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'vocab-tgt.json' )
print(f"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# merges_file (bpecodes)
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
if os.path.exists(_UpperCamelCase ):
break
with open(_UpperCamelCase , encoding='utf-8' ) as fin:
SCREAMING_SNAKE_CASE = fin.read()
SCREAMING_SNAKE_CASE = re.sub(R' \d+$' , '' , _UpperCamelCase , 0 , re.M ) # remove frequency number
print(f"""Generating {merges_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as fout:
fout.write(_UpperCamelCase )
# model config
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f"""need to extend tokenizer to support bpe={args['bpe']}"""
assert args["tokenizer"] == "moses", f"""need to extend tokenizer to support bpe={args['tokenizer']}"""
SCREAMING_SNAKE_CASE = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
SCREAMING_SNAKE_CASE = 5
SCREAMING_SNAKE_CASE = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
SCREAMING_SNAKE_CASE = best_score_hparams[model_dir]['length_penalty']
else:
SCREAMING_SNAKE_CASE = 1.0
print(f"""Generating {fsmt_model_config_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# tokenizer config
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = {
'langs': [src_lang, tgt_lang],
'model_max_length': 10_24,
'do_lower_case': do_lower_case,
}
print(f"""Generating {fsmt_tokenizer_config_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# model
SCREAMING_SNAKE_CASE = chkpt['models'][0]
SCREAMING_SNAKE_CASE = model.state_dict()
# rename keys to start with 'model.'
SCREAMING_SNAKE_CASE = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
SCREAMING_SNAKE_CASE = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = FSMTConfig.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = FSMTForConditionalGeneration(_UpperCamelCase )
# check that it loads ok
model_new.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
# save
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(_UpperCamelCase , _UpperCamelCase )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f"""cd {data_root}""" )
print(f"""transformers-cli upload {model_dir}""" )
if __name__ == "__main__":
a_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a_ : int = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 673 | 1 |
from __future__ import annotations
import time
a_ : str = list[tuple[int, int]]
a_ : Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a_ : int = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class UpperCamelCase :
def __init__( self : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : Node | None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = pos_x
SCREAMING_SNAKE_CASE = pos_y
SCREAMING_SNAKE_CASE = (pos_y, pos_x)
SCREAMING_SNAKE_CASE = goal_x
SCREAMING_SNAKE_CASE = goal_y
SCREAMING_SNAKE_CASE = parent
class UpperCamelCase :
def __init__( self : str , snake_case__ : tuple[int, int] , snake_case__ : tuple[int, int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = Node(start[1] , start[0] , goal[1] , goal[0] , snake_case__ )
SCREAMING_SNAKE_CASE = Node(goal[1] , goal[0] , goal[1] , goal[0] , snake_case__ )
SCREAMING_SNAKE_CASE = [self.start]
SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
while self.node_queue:
SCREAMING_SNAKE_CASE = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
SCREAMING_SNAKE_CASE = True
return self.retrace_path(snake_case__ )
SCREAMING_SNAKE_CASE = self.get_successors(snake_case__ )
for node in successors:
self.node_queue.append(snake_case__ )
if not self.reached:
return [self.start.pos]
return None
def UpperCamelCase ( self : List[Any] , snake_case__ : Node ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
for action in delta:
SCREAMING_SNAKE_CASE = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(snake_case__ , snake_case__ , self.target.pos_y , self.target.pos_x , snake_case__ ) )
return successors
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Node | None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = node
SCREAMING_SNAKE_CASE = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
SCREAMING_SNAKE_CASE = current_node.parent
path.reverse()
return path
class UpperCamelCase :
def __init__( self : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = BreadthFirstSearch(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = BreadthFirstSearch(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self : int ):
"""simple docstring"""
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
SCREAMING_SNAKE_CASE = self.fwd_bfs.node_queue.pop(0 )
SCREAMING_SNAKE_CASE = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
SCREAMING_SNAKE_CASE = True
return self.retrace_bidirectional_path(
snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = current_bwd_node
SCREAMING_SNAKE_CASE = current_fwd_node
SCREAMING_SNAKE_CASE = {
self.fwd_bfs: self.fwd_bfs.get_successors(snake_case__ ),
self.bwd_bfs: self.bwd_bfs.get_successors(snake_case__ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(snake_case__ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Node , snake_case__ : Node ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.fwd_bfs.retrace_path(snake_case__ )
SCREAMING_SNAKE_CASE = self.bwd_bfs.retrace_path(snake_case__ )
bwd_path.pop()
bwd_path.reverse()
SCREAMING_SNAKE_CASE = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
a_ : Tuple = (0, 0)
a_ : Any = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a_ : str = time.time()
a_ : Optional[int] = BreadthFirstSearch(init, goal)
a_ : List[str] = bfs.search()
a_ : Tuple = time.time() - start_bfs_time
print("Unidirectional BFS computation time : ", bfs_time)
a_ : Optional[int] = time.time()
a_ : Optional[Any] = BidirectionalBreadthFirstSearch(init, goal)
a_ : Optional[Any] = bd_bfs.search()
a_ : int = time.time() - start_bd_bfs_time
print("Bidirectional BFS computation time : ", bd_bfs_time)
| 673 |
import random
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : float , _UpperCamelCase : bool = False ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {i: [] for i in range(_UpperCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_UpperCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_UpperCamelCase ):
for j in range(i + 1 , _UpperCamelCase ):
if random.random() < probability:
graph[i].append(_UpperCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_UpperCamelCase )
return graph
def __lowerCAmelCase ( _UpperCamelCase : int ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(_UpperCamelCase ) if i != j] for i in range(_UpperCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 | 1 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
def __init__( self : int , snake_case__ : List[Any] , snake_case__ : str=1_3 , snake_case__ : Optional[Any]=7 , snake_case__ : str=True , snake_case__ : Optional[int]=True , snake_case__ : Tuple=True , snake_case__ : Tuple=True , snake_case__ : Tuple=9_9 , snake_case__ : List[str]=2_4 , snake_case__ : Union[str, Any]=2 , snake_case__ : Union[str, Any]=6 , snake_case__ : List[str]=3_7 , snake_case__ : List[str]="gelu" , snake_case__ : List[str]=0.1 , snake_case__ : str=0.1 , snake_case__ : Any=5_1_2 , snake_case__ : Tuple=1_6 , snake_case__ : Any=2 , snake_case__ : List[Any]=0.02 , snake_case__ : Dict=3 , snake_case__ : Dict=None , snake_case__ : Optional[int]=1_0_0_0 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = range_bbox
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE = bbox[i, j, 3]
SCREAMING_SNAKE_CASE = bbox[i, j, 1]
SCREAMING_SNAKE_CASE = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE = bbox[i, j, 2]
SCREAMING_SNAKE_CASE = bbox[i, j, 0]
SCREAMING_SNAKE_CASE = t
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase ( self : int , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = LiltModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , bbox=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ , bbox=snake_case__ , token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ , bbox=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase ( self : int , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Dict , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = LiltForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
snake_case__ , bbox=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self : Dict , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = LiltForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
snake_case__ , bbox=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__UpperCamelCase =(
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : int , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : Union[str, Any] ):
"""simple docstring"""
return True
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = LiltModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
@slow
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = LiltModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
@slow
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(snake_case__ )
SCREAMING_SNAKE_CASE = torch.tensor([[1, 2]] , device=snake_case__ )
SCREAMING_SNAKE_CASE = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=snake_case__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(input_ids=snake_case__ , bbox=snake_case__ )
SCREAMING_SNAKE_CASE = torch.Size([1, 2, 7_6_8] )
SCREAMING_SNAKE_CASE = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=snake_case__ , )
self.assertTrue(outputs.last_hidden_state.shape , snake_case__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , snake_case__ , atol=1E-3 ) )
| 673 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
def __init__( self : List[str] , snake_case__ : Dict , snake_case__ : Optional[Any]=1_3 , snake_case__ : Union[str, Any]=7 , snake_case__ : List[str]=True , snake_case__ : Any=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=9_9 , snake_case__ : str=3_2 , snake_case__ : Dict=5 , snake_case__ : str=4 , snake_case__ : int=3_7 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : Any=0.1 , snake_case__ : Dict=0.1 , snake_case__ : Optional[Any]=5_1_2 , snake_case__ : List[Any]=1_6 , snake_case__ : str=2 , snake_case__ : int=0.02 , snake_case__ : List[str]=3 , snake_case__ : Dict=4 , snake_case__ : str=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def UpperCamelCase ( self : List[str] , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ , token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : List[Any] , snake_case__ : str , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : int , snake_case__ : int , snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = NystromformerForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = NystromformerForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self : int , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = NystromformerForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase =(
{
"feature-extraction": NystromformerModel,
"fill-mask": NystromformerForMaskedLM,
"question-answering": NystromformerForQuestionAnswering,
"text-classification": NystromformerForSequenceClassification,
"token-classification": NystromformerForTokenClassification,
"zero-shot": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
@slow
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(snake_case__ )[0]
SCREAMING_SNAKE_CASE = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , snake_case__ )
SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.4_532, -0.0_936, 0.5_137], [-0.2_676, 0.0_628, 0.6_186], [-0.3_629, -0.1_726, 0.4_716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'the [MASK] of Belgium is Brussels'
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = tokenizer(snake_case__ , return_tensors='pt' )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(encoding.input_ids ).logits
SCREAMING_SNAKE_CASE = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(snake_case__ ) , 'capital' )
| 673 | 1 |
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = abs(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
while n > 0:
res += n % 10
n //= 10
return res
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = abs(_UpperCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
return sum(int(_UpperCamelCase ) for c in str(abs(_UpperCamelCase ) ) )
def __lowerCAmelCase ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_UpperCamelCase : Callable , _UpperCamelCase : int ) -> None:
SCREAMING_SNAKE_CASE = f"""{func.__name__}({value})"""
SCREAMING_SNAKE_CASE = timeit(f"""__main__.{call}""" , setup='import __main__' )
print(f"""{call:56} = {func(_UpperCamelCase )} -- {timing:.4f} seconds""" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_UpperCamelCase , _UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 673 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 673 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
a_ : Dict = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
a_ : List[str] = TaTokenizerFast
a_ : List[Any] = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
a_ : List[Any] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 673 |
import heapq
import sys
import numpy as np
a_ : Optional[int] = tuple[int, int]
class UpperCamelCase :
def __init__( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('inf' )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return len(self.elements ) == 0
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(snake_case__ )
else:
# update
# print("update", item)
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def UpperCamelCase ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
if item in self.set:
self.set.remove(snake_case__ )
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def UpperCamelCase ( self : str ):
"""simple docstring"""
return self.elements[0][1]
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
self.set.remove(snake_case__ )
return (priority, item)
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
return np.linalg.norm(a - b )
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Dict:
'''simple docstring'''
return consistent_heuristic(_UpperCamelCase , _UpperCamelCase ) // t
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Optional[int]:
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : int , _UpperCamelCase : TPos , _UpperCamelCase : dict[TPos, float] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = g_function[start] + Wa * heuristics[i](_UpperCamelCase , _UpperCamelCase )
return ans
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.chararray((n, n) )
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = '*'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (j, (n - 1) - i) in blocks:
SCREAMING_SNAKE_CASE = '#'
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = x
# print(x)
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[x]
SCREAMING_SNAKE_CASE = '-'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
print(_UpperCamelCase , end=' ' )
SCREAMING_SNAKE_CASE = back_pointer[x]
print(_UpperCamelCase )
sys.exit()
def __lowerCAmelCase ( _UpperCamelCase : TPos ) -> Any:
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , ) -> List[Any]:
'''simple docstring'''
for itera in range(_UpperCamelCase ):
open_list[itera].remove_element(_UpperCamelCase )
# print("s", s)
# print("j", j)
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = s
SCREAMING_SNAKE_CASE = (x - 1, y)
SCREAMING_SNAKE_CASE = (x + 1, y)
SCREAMING_SNAKE_CASE = (x, y + 1)
SCREAMING_SNAKE_CASE = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_UpperCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_UpperCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = float('inf' )
if valid(_UpperCamelCase ) and g_function[neighbours] > g_function[s] + 1:
SCREAMING_SNAKE_CASE = g_function[s] + 1
SCREAMING_SNAKE_CASE = s
if neighbours not in close_list_anchor:
open_list[0].put(_UpperCamelCase , key(_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , _UpperCamelCase ):
if key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) <= Wa * key(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ):
open_list[j].put(
_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
def __lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a_ : str = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a_ : List[str] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a_ : Union[str, Any] = make_common_ground()
a_ : Tuple = blocks_blk
# hyper parameters
a_ : Any = 1
a_ : List[str] = 1
a_ : Union[str, Any] = 20
a_ : Optional[Any] = 3 # one consistent and two other inconsistent
# start and end destination
a_ : int = (0, 0)
a_ : Optional[int] = (n - 1, n - 1)
a_ : Union[str, Any] = 1
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos , _UpperCamelCase : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {start: 0, goal: float('inf' )}
SCREAMING_SNAKE_CASE = {start: -1, goal: -1}
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
for i in range(_UpperCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , _UpperCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = open_list[i].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_inad.append(_UpperCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = open_list[0].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_anchor.append(_UpperCamelCase )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_UpperCamelCase ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 673 | 1 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class UpperCamelCase ( unittest.TestCase ):
def __init__( self : Dict , snake_case__ : List[str] , snake_case__ : bool = True , snake_case__ : Dict[str, int] = None , snake_case__ : int = 3_2 , snake_case__ : bool = True , snake_case__ : Union[int, float] = 1 / 2_5_5 , snake_case__ : bool = True , snake_case__ : bool = True , snake_case__ : Optional[Union[float, List[float]]] = [0.48_145_466, 0.4_578_275, 0.40_821_073] , snake_case__ : Optional[Union[float, List[float]]] = [0.26_862_954, 0.26_130_258, 0.27_577_711] , snake_case__ : bool = True , snake_case__ : List[Any]=7 , snake_case__ : Optional[int]=3_0 , snake_case__ : Tuple=4_0_0 , snake_case__ : Any=3 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size if size is not None else {'shortest_edge': 2_8_8}
SCREAMING_SNAKE_CASE = size_divisor
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = rescale_factor
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = do_center_crop
SCREAMING_SNAKE_CASE = image_mean
SCREAMING_SNAKE_CASE = image_std
SCREAMING_SNAKE_CASE = do_pad
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
def UpperCamelCase ( self : Any ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def UpperCamelCase ( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : List[str]=False ):
"""simple docstring"""
if not batched:
SCREAMING_SNAKE_CASE = self.size['shortest_edge']
SCREAMING_SNAKE_CASE = image_inputs[0]
if isinstance(snake_case__ , Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image.size
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2]
SCREAMING_SNAKE_CASE = size / min(snake_case__ , snake_case__ )
if h < w:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = size, scale * w
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = scale * h, size
SCREAMING_SNAKE_CASE = int((1_3_3_3 / 8_0_0) * size )
if max(snake_case__ , snake_case__ ) > max_size:
SCREAMING_SNAKE_CASE = max_size / max(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = newh * scale
SCREAMING_SNAKE_CASE = neww * scale
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = int(newh + 0.5 ), int(neww + 0.5 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
SCREAMING_SNAKE_CASE = []
for image in image_inputs:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE = max(snake_case__ , key=lambda snake_case__ : item[0] )[0]
SCREAMING_SNAKE_CASE = max(snake_case__ , key=lambda snake_case__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =BridgeTowerImageProcessor if is_vision_available() else None
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = BridgeTowerImageProcessingTester(self )
@property
def UpperCamelCase ( self : str ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , 'image_mean' ) )
self.assertTrue(hasattr(snake_case__ , 'image_std' ) )
self.assertTrue(hasattr(snake_case__ , 'do_normalize' ) )
self.assertTrue(hasattr(snake_case__ , 'do_resize' ) )
self.assertTrue(hasattr(snake_case__ , 'size' ) )
self.assertTrue(hasattr(snake_case__ , 'size_divisor' ) )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
pass
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(snake_case__ , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(snake_case__ , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(snake_case__ , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(snake_case__ , batched=snake_case__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 673 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ : str = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Any=8 ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
SCREAMING_SNAKE_CASE = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=5_12 , _UpperCamelCase : Union[str, Any]=5_12 ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
SCREAMING_SNAKE_CASE = np.array(pil_image.convert('RGB' ) )
SCREAMING_SNAKE_CASE = arr.astype(np.floataa ) / 1_27.5 - 1
SCREAMING_SNAKE_CASE = np.transpose(_UpperCamelCase , [2, 0, 1] )
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).unsqueeze(0 )
return image
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : int , snake_case__ : UNetaDConditionModel , snake_case__ : DDPMScheduler , snake_case__ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=snake_case__ , scheduler=snake_case__ , movq=snake_case__ , )
SCREAMING_SNAKE_CASE = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase ( self : Any , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = min(int(num_inference_steps * strength ) , snake_case__ )
SCREAMING_SNAKE_CASE = max(num_inference_steps - init_timestep , 0 )
SCREAMING_SNAKE_CASE = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase ( self : List[str] , snake_case__ : Dict , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : str=None ):
"""simple docstring"""
if not isinstance(snake_case__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(snake_case__ )}""" )
SCREAMING_SNAKE_CASE = image.to(device=snake_case__ , dtype=snake_case__ )
SCREAMING_SNAKE_CASE = batch_size * num_images_per_prompt
if image.shape[1] == 4:
SCREAMING_SNAKE_CASE = image
else:
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(snake_case__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case__ )
]
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
else:
SCREAMING_SNAKE_CASE = self.movq.encode(snake_case__ ).latent_dist.sample(snake_case__ )
SCREAMING_SNAKE_CASE = self.movq.config.scaling_factor * init_latents
SCREAMING_SNAKE_CASE = torch.cat([init_latents] , dim=0 )
SCREAMING_SNAKE_CASE = init_latents.shape
SCREAMING_SNAKE_CASE = randn_tensor(snake_case__ , generator=snake_case__ , device=snake_case__ , dtype=snake_case__ )
# get latents
SCREAMING_SNAKE_CASE = self.scheduler.add_noise(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = init_latents
return latents
def UpperCamelCase ( self : int , snake_case__ : List[str]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
SCREAMING_SNAKE_CASE = torch.device(F"""cuda:{gpu_id}""" )
SCREAMING_SNAKE_CASE = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case__ , snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[int]=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
SCREAMING_SNAKE_CASE = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=snake_case__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE = None
for cpu_offloaded_model in [self.unet, self.movq]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cpu_offload_with_hook(snake_case__ , snake_case__ , prev_module_hook=snake_case__ )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case__ )
def __call__( self : str , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : int = 5_1_2 , snake_case__ : int = 5_1_2 , snake_case__ : int = 1_0_0 , snake_case__ : float = 4.0 , snake_case__ : float = 0.3 , snake_case__ : int = 1 , snake_case__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._execution_device
SCREAMING_SNAKE_CASE = guidance_scale > 1.0
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = image_embeds.shape[0]
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE = image_embeds.repeat_interleave(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = negative_image_embeds.repeat_interleave(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = [image]
if not all(isinstance(snake_case__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(snake_case__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
SCREAMING_SNAKE_CASE = torch.cat([prepare_image(snake_case__ , snake_case__ , snake_case__ ) for i in image] , dim=0 )
SCREAMING_SNAKE_CASE = image.to(dtype=image_embeds.dtype , device=snake_case__ )
SCREAMING_SNAKE_CASE = self.movq.encode(snake_case__ )['latents']
SCREAMING_SNAKE_CASE = latents.repeat_interleave(snake_case__ , dim=0 )
self.scheduler.set_timesteps(snake_case__ , device=snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_timesteps(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = timesteps[:1].repeat(batch_size * num_images_per_prompt )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = downscale_height_and_width(snake_case__ , snake_case__ , self.movq_scale_factor )
SCREAMING_SNAKE_CASE = self.prepare_latents(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , image_embeds.dtype , snake_case__ , snake_case__ )
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE = {'image_embeds': image_embeds}
SCREAMING_SNAKE_CASE = self.unet(
sample=snake_case__ , timestep=snake_case__ , encoder_hidden_states=snake_case__ , added_cond_kwargs=snake_case__ , return_dict=snake_case__ , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE = self.scheduler.step(
snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ , )[0]
# post-processing
SCREAMING_SNAKE_CASE = self.movq.decode(snake_case__ , force_not_quantize=snake_case__ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case__ )
| 673 | 1 |
def __lowerCAmelCase ( _UpperCamelCase : int ) -> list[int]:
'''simple docstring'''
if num <= 0:
raise ValueError('Input must be a positive integer' )
SCREAMING_SNAKE_CASE = [True] * (num + 1)
SCREAMING_SNAKE_CASE = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : Dict = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 673 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
a_ : List[Any] = logging.get_logger("transformers.models.speecht5")
def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Dict:
'''simple docstring'''
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE = checkpoint['input_conv.weight_g']
SCREAMING_SNAKE_CASE = checkpoint['input_conv.weight_v']
SCREAMING_SNAKE_CASE = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.weight_g']
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.weight_v']
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : Any=None , _UpperCamelCase : List[str]=None , ) -> Tuple:
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig.from_pretrained(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.load(_UpperCamelCase )
load_weights(orig_checkpoint['model']['generator'] , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = np.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).float()
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).float()
model.save_pretrained(_UpperCamelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
a_ : Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 673 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : int = logging.get_logger(__name__)
a_ : str = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="speech_to_text"
__UpperCamelCase =["past_key_values"]
__UpperCamelCase ={"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : int , snake_case__ : int=1_0_0_0_0 , snake_case__ : Union[str, Any]=1_2 , snake_case__ : Dict=2_0_4_8 , snake_case__ : Optional[Any]=4 , snake_case__ : Union[str, Any]=6 , snake_case__ : Optional[int]=2_0_4_8 , snake_case__ : Union[str, Any]=4 , snake_case__ : Optional[Any]=0.0 , snake_case__ : int=0.0 , snake_case__ : Tuple=True , snake_case__ : Dict=True , snake_case__ : Optional[int]="relu" , snake_case__ : List[str]=2_5_6 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : str=0.0 , snake_case__ : Union[str, Any]=0.0 , snake_case__ : List[Any]=0.02 , snake_case__ : Optional[Any]=2 , snake_case__ : str=True , snake_case__ : Union[str, Any]=1 , snake_case__ : List[Any]=0 , snake_case__ : Dict=2 , snake_case__ : List[Any]=6_0_0_0 , snake_case__ : Optional[int]=1_0_2_4 , snake_case__ : List[Any]=2 , snake_case__ : Optional[Any]=(5, 5) , snake_case__ : Dict=1_0_2_4 , snake_case__ : Any=8_0 , snake_case__ : List[Any]=1 , **snake_case__ : List[Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = d_model
SCREAMING_SNAKE_CASE = encoder_ffn_dim
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = encoder_attention_heads
SCREAMING_SNAKE_CASE = decoder_ffn_dim
SCREAMING_SNAKE_CASE = decoder_layers
SCREAMING_SNAKE_CASE = decoder_attention_heads
SCREAMING_SNAKE_CASE = dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = init_std
SCREAMING_SNAKE_CASE = encoder_layerdrop
SCREAMING_SNAKE_CASE = decoder_layerdrop
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE = max_source_positions
SCREAMING_SNAKE_CASE = max_target_positions
SCREAMING_SNAKE_CASE = num_conv_layers
SCREAMING_SNAKE_CASE = list(snake_case__ )
SCREAMING_SNAKE_CASE = conv_channels
SCREAMING_SNAKE_CASE = input_feat_per_channel
SCREAMING_SNAKE_CASE = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
F"""but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , decoder_start_token_id=snake_case__ , **snake_case__ , )
| 673 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
a_ : List[Any] = {
"allenai/led-base-16384": 1_6384,
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =LEDTokenizer
__UpperCamelCase =["input_ids", "attention_mask"]
def __init__( self : Tuple , snake_case__ : List[Any]=None , snake_case__ : List[str]=None , snake_case__ : List[str]=None , snake_case__ : Dict="replace" , snake_case__ : Tuple="<s>" , snake_case__ : Optional[Any]="</s>" , snake_case__ : int="</s>" , snake_case__ : Dict="<s>" , snake_case__ : Union[str, Any]="<unk>" , snake_case__ : Optional[int]="<pad>" , snake_case__ : List[str]="<mask>" , snake_case__ : List[Any]=False , snake_case__ : int=True , **snake_case__ : Dict , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = pre_tok_class(**snake_case__ )
SCREAMING_SNAKE_CASE = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE = 'post_processor'
SCREAMING_SNAKE_CASE = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE = tuple(state['cls'] )
SCREAMING_SNAKE_CASE = False
if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = True
if state.get('trim_offsets' , snake_case__ ) != trim_offsets:
SCREAMING_SNAKE_CASE = trim_offsets
SCREAMING_SNAKE_CASE = True
if changes_to_apply:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , state.pop('type' ) )
SCREAMING_SNAKE_CASE = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase ( self : List[Any] , snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
SCREAMING_SNAKE_CASE = value
def UpperCamelCase ( self : Dict , *snake_case__ : Optional[Any] , **snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : List[str] , *snake_case__ : List[Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : Tuple=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Union[Dict[str, EncodedInput], BatchEncoding] , snake_case__ : Optional[int] = None , snake_case__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = super()._pad(
encoded_inputs=snake_case__ , max_length=snake_case__ , padding_strategy=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE = len(encoded_inputs['global_attention_mask'] ) != len(snake_case__ )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE = len(snake_case__ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 673 | 1 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a_ : Optional[Any] = logging.get_logger(__name__)
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="linear"
__UpperCamelCase ="cosine"
__UpperCamelCase ="cosine_with_restarts"
__UpperCamelCase ="polynomial"
__UpperCamelCase ="constant"
__UpperCamelCase ="constant_with_warmup"
__UpperCamelCase ="piecewise_constant"
def __lowerCAmelCase ( _UpperCamelCase : Optimizer , _UpperCamelCase : int = -1 ) -> int:
'''simple docstring'''
return LambdaLR(_UpperCamelCase , lambda _UpperCamelCase : 1 , last_epoch=_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : Optimizer , _UpperCamelCase : int , _UpperCamelCase : int = -1 ) -> Optional[Any]:
'''simple docstring'''
def lr_lambda(_UpperCamelCase : int ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1.0 , _UpperCamelCase ) )
return 1.0
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : Optimizer , _UpperCamelCase : str , _UpperCamelCase : int = -1 ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = step_rules.split(',' )
for rule_str in rule_list[:-1]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = rule_str.split(':' )
SCREAMING_SNAKE_CASE = int(_UpperCamelCase )
SCREAMING_SNAKE_CASE = float(_UpperCamelCase )
SCREAMING_SNAKE_CASE = value
SCREAMING_SNAKE_CASE = float(rule_list[-1] )
def create_rules_function(_UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] ):
def rule_func(_UpperCamelCase : int ) -> float:
SCREAMING_SNAKE_CASE = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_UpperCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
SCREAMING_SNAKE_CASE = create_rules_function(_UpperCamelCase , _UpperCamelCase )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : Any , _UpperCamelCase : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any]=-1 ) -> List[str]:
'''simple docstring'''
def lr_lambda(_UpperCamelCase : int ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : Optimizer , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : float = 0.5 , _UpperCamelCase : int = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(_UpperCamelCase : int ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_UpperCamelCase ) * 2.0 * progress )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : Optimizer , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int = 1 , _UpperCamelCase : int = -1 ) -> Optional[int]:
'''simple docstring'''
def lr_lambda(_UpperCamelCase : Union[str, Any] ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_UpperCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any]=1e-7 , _UpperCamelCase : Dict=1.0 , _UpperCamelCase : Tuple=-1 ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = optimizer.defaults['lr']
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(_UpperCamelCase : int ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
SCREAMING_SNAKE_CASE = lr_init - lr_end
SCREAMING_SNAKE_CASE = num_training_steps - num_warmup_steps
SCREAMING_SNAKE_CASE = 1 - (current_step - num_warmup_steps) / decay_steps
SCREAMING_SNAKE_CASE = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
a_ : List[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __lowerCAmelCase ( _UpperCamelCase : Union[str, SchedulerType] , _UpperCamelCase : Optimizer , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : int = 1 , _UpperCamelCase : float = 1.0 , _UpperCamelCase : int = -1 , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = SchedulerType(_UpperCamelCase )
SCREAMING_SNAKE_CASE = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_UpperCamelCase , last_epoch=_UpperCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_UpperCamelCase , step_rules=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_UpperCamelCase , num_warmup_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , num_cycles=_UpperCamelCase , last_epoch=_UpperCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , power=_UpperCamelCase , last_epoch=_UpperCamelCase , )
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
| 673 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __lowerCAmelCase ( *_UpperCamelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(_UpperCamelCase , 'r' ) as fh:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_EX )
try:
print(*_UpperCamelCase )
finally:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_UN )
a_ : int = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
a_ : str = torch.device("cuda", local_rank)
a_ : Optional[int] = socket.gethostname()
a_ : Union[str, Any] = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
a_ : Dict = dist.get_rank()
a_ : Any = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 673 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ : Any = "▁"
a_ : List[str] = {"vocab_file": "spiece.model"}
a_ : List[Any] = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}
}
a_ : Any = {
"google/pegasus-xsum": 512,
}
a_ : Optional[int] = logging.get_logger(__name__)
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =["input_ids", "attention_mask"]
def __init__( self : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : List[str]="<pad>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : int="<unk>" , snake_case__ : str="<mask_2>" , snake_case__ : Tuple="<mask_1>" , snake_case__ : Tuple=None , snake_case__ : Dict=1_0_3 , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : Dict , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = offset
if additional_special_tokens is not None:
if not isinstance(snake_case__ , snake_case__ ):
raise TypeError(
F"""additional_special_tokens should be of type {type(snake_case__ )}, but is"""
F""" {type(snake_case__ )}""" )
SCREAMING_SNAKE_CASE = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"""<unk_{i}>""" for i in range(len(snake_case__ ) , self.offset - 1 )
]
if len(set(snake_case__ ) ) != len(snake_case__ ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
F""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
SCREAMING_SNAKE_CASE = additional_special_tokens_extended
else:
SCREAMING_SNAKE_CASE = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"""<unk_{i}>""" for i in range(2 , self.offset )]
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case__ , unk_token=snake_case__ , mask_token=snake_case__ , pad_token=snake_case__ , mask_token_sent=snake_case__ , offset=snake_case__ , additional_special_tokens=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
SCREAMING_SNAKE_CASE = mask_token_sent
SCREAMING_SNAKE_CASE = vocab_file
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case__ )
# add special tokens to encoder dict
SCREAMING_SNAKE_CASE = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
@property
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return len(self.sp_model ) + self.offset
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self : str , snake_case__ : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase ( self : Any , snake_case__ : str ):
"""simple docstring"""
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str ):
"""simple docstring"""
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
SCREAMING_SNAKE_CASE = self.sp_model.piece_to_id(snake_case__ )
return sp_id + self.offset
def UpperCamelCase ( self : Optional[int] , snake_case__ : int ):
"""simple docstring"""
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
SCREAMING_SNAKE_CASE = self.sp_model.IdToPiece(index - self.offset )
return token
def UpperCamelCase ( self : Tuple , snake_case__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case__ ) + token
SCREAMING_SNAKE_CASE = []
else:
current_sub_tokens.append(snake_case__ )
out_string += self.sp_model.decode(snake_case__ )
return out_string.strip()
def UpperCamelCase ( self : Any , snake_case__ : Any=False ):
"""simple docstring"""
return 1
def UpperCamelCase ( self : List[Any] , snake_case__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : List , snake_case__ : Optional[List] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(snake_case__ )
elif token_ids_a is None:
return self._special_token_mask(snake_case__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def UpperCamelCase ( self : int , snake_case__ : Tuple , snake_case__ : Optional[int]=None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(snake_case__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , 'wb' ) as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
| 673 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
a_ : Union[str, Any] = {"allegro/herbert-base-cased": 514}
a_ : List[Any] = {}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =HerbertTokenizer
def __init__( self : Tuple , snake_case__ : Optional[Any]=None , snake_case__ : int=None , snake_case__ : Optional[int]=None , snake_case__ : str="<s>" , snake_case__ : Tuple="<unk>" , snake_case__ : List[str]="<pad>" , snake_case__ : Tuple="<mask>" , snake_case__ : Dict="</s>" , **snake_case__ : List[str] , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , sep_token=snake_case__ , **snake_case__ , )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 673 | 1 |
import numpy
class UpperCamelCase :
def __init__( self : Union[str, Any] , snake_case__ : numpy.ndarray , snake_case__ : numpy.ndarray ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
SCREAMING_SNAKE_CASE = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
SCREAMING_SNAKE_CASE = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
SCREAMING_SNAKE_CASE = numpy.random.rand(3 , 1 )
# Real output values provided.
SCREAMING_SNAKE_CASE = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
SCREAMING_SNAKE_CASE = numpy.zeros(output_array.shape )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
SCREAMING_SNAKE_CASE = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
SCREAMING_SNAKE_CASE = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
SCREAMING_SNAKE_CASE = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
SCREAMING_SNAKE_CASE = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : numpy.ndarray , snake_case__ : int , snake_case__ : bool ):
"""simple docstring"""
for iteration in range(1 , iterations + 1 ):
SCREAMING_SNAKE_CASE = self.feedforward()
self.back_propagation()
if give_loss:
SCREAMING_SNAKE_CASE = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"""Iteration {iteration} Loss: {loss}""" )
def UpperCamelCase ( self : str , snake_case__ : numpy.ndarray ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = input_arr
SCREAMING_SNAKE_CASE = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
SCREAMING_SNAKE_CASE = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
SCREAMING_SNAKE_CASE = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def __lowerCAmelCase ( _UpperCamelCase : numpy.ndarray ) -> numpy.ndarray:
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def __lowerCAmelCase ( _UpperCamelCase : numpy.ndarray ) -> numpy.ndarray:
'''simple docstring'''
return (value) * (1 - (value))
def __lowerCAmelCase ( ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
SCREAMING_SNAKE_CASE = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
SCREAMING_SNAKE_CASE = TwoHiddenLayerNeuralNetwork(
input_array=_UpperCamelCase , output_array=_UpperCamelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_UpperCamelCase , iterations=10 , give_loss=_UpperCamelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 673 |
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = abs(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
while n > 0:
res += n % 10
n //= 10
return res
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = abs(_UpperCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
return sum(int(_UpperCamelCase ) for c in str(abs(_UpperCamelCase ) ) )
def __lowerCAmelCase ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_UpperCamelCase : Callable , _UpperCamelCase : int ) -> None:
SCREAMING_SNAKE_CASE = f"""{func.__name__}({value})"""
SCREAMING_SNAKE_CASE = timeit(f"""__main__.{call}""" , setup='import __main__' )
print(f"""{call:56} = {func(_UpperCamelCase )} -- {timing:.4f} seconds""" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_UpperCamelCase , _UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 673 | 1 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
a_ : List[str] = "__DUMMY_TRANSFORMERS_USER__"
a_ : str = "Dummy User"
a_ : List[str] = "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"
a_ : Optional[Any] = "https://hub-ci.huggingface.co"
a_ : Optional[int] = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
a_ : Optional[int] = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
a_ : Optional[Any] = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def __lowerCAmelCase ( _UpperCamelCase : str ) -> Union[str, Any]:
'''simple docstring'''
monkeypatch.setattr(
'huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE' , _UpperCamelCase )
@pytest.fixture
def __lowerCAmelCase ( _UpperCamelCase : Any ) -> Tuple:
'''simple docstring'''
monkeypatch.setattr('datasets.config.HF_ENDPOINT' , _UpperCamelCase )
monkeypatch.setattr('datasets.config.HUB_DATASETS_URL' , _UpperCamelCase )
@pytest.fixture
def __lowerCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
monkeypatch.setattr('huggingface_hub.hf_api.HfFolder.path_token' , _UpperCamelCase )
@pytest.fixture
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : str ) -> int:
'''simple docstring'''
HfFolder.save_token(_UpperCamelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope='session' )
def __lowerCAmelCase ( ) -> Optional[Any]:
'''simple docstring'''
return HfApi(endpoint=_UpperCamelCase )
@pytest.fixture(scope='session' )
def __lowerCAmelCase ( _UpperCamelCase : HfApi ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = HfFolder.get_token()
HfFolder.save_token(_UpperCamelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(_UpperCamelCase )
@pytest.fixture
def __lowerCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
def _cleanup_repo(_UpperCamelCase : Tuple ):
hf_api.delete_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='dataset' )
return _cleanup_repo
@pytest.fixture
def __lowerCAmelCase ( _UpperCamelCase : List[str] ) -> List[str]:
'''simple docstring'''
@contextmanager
def _temporary_repo(_UpperCamelCase : Union[str, Any] ):
try:
yield repo_id
finally:
cleanup_repo(_UpperCamelCase )
return _temporary_repo
@pytest.fixture(scope='session' )
def __lowerCAmelCase ( _UpperCamelCase : HfApi , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = f"""repo_txt_data-{int(time.time() * 10e3 )}"""
SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='dataset' , private=_UpperCamelCase )
hf_api.upload_file(
token=_UpperCamelCase , path_or_fileobj=str(_UpperCamelCase ) , path_in_repo='data/text_data.txt' , repo_id=_UpperCamelCase , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __lowerCAmelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] ) -> str:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='session' )
def __lowerCAmelCase ( _UpperCamelCase : HfApi , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = f"""repo_zipped_txt_data-{int(time.time() * 10e3 )}"""
SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='dataset' , private=_UpperCamelCase )
hf_api.upload_file(
token=_UpperCamelCase , path_or_fileobj=str(_UpperCamelCase ) , path_in_repo='data.zip' , repo_id=_UpperCamelCase , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Any , _UpperCamelCase : Any ) -> Optional[int]:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='session' )
def __lowerCAmelCase ( _UpperCamelCase : HfApi , _UpperCamelCase : Dict , _UpperCamelCase : Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = f"""repo_zipped_img_data-{int(time.time() * 10e3 )}"""
SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='dataset' , private=_UpperCamelCase )
hf_api.upload_file(
token=_UpperCamelCase , path_or_fileobj=str(_UpperCamelCase ) , path_in_repo='data.zip' , repo_id=_UpperCamelCase , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __lowerCAmelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any ) -> Tuple:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 673 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 673 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : List[Any] = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="wav2vec2"
def __init__( self : Dict , snake_case__ : Union[str, Any]=3_2 , snake_case__ : Tuple=7_6_8 , snake_case__ : str=1_2 , snake_case__ : List[str]=1_2 , snake_case__ : Dict=3_0_7_2 , snake_case__ : Any="gelu" , snake_case__ : int=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : str=0.1 , snake_case__ : Optional[int]=0.0 , snake_case__ : Optional[Any]=0.0 , snake_case__ : Any=0.1 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : List[str]=0.02 , snake_case__ : Tuple=1E-5 , snake_case__ : Optional[int]="group" , snake_case__ : int="gelu" , snake_case__ : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , snake_case__ : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , snake_case__ : Optional[Any]=(1_0, 3, 3, 3, 3, 2, 2) , snake_case__ : Any=False , snake_case__ : Union[str, Any]=1_2_8 , snake_case__ : Optional[Any]=1_6 , snake_case__ : Optional[Any]=False , snake_case__ : Union[str, Any]=True , snake_case__ : Tuple=0.05 , snake_case__ : Dict=1_0 , snake_case__ : List[str]=2 , snake_case__ : Any=0.0 , snake_case__ : int=1_0 , snake_case__ : int=0 , snake_case__ : Optional[int]=3_2_0 , snake_case__ : Optional[Any]=2 , snake_case__ : Optional[int]=0.1 , snake_case__ : List[str]=1_0_0 , snake_case__ : Any=2_5_6 , snake_case__ : List[Any]=2_5_6 , snake_case__ : str=0.1 , snake_case__ : Optional[int]="sum" , snake_case__ : Any=False , snake_case__ : Optional[Any]=False , snake_case__ : str=2_5_6 , snake_case__ : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , snake_case__ : Dict=(5, 3, 3, 1, 1) , snake_case__ : Optional[int]=(1, 2, 3, 1, 1) , snake_case__ : List[Any]=5_1_2 , snake_case__ : Optional[int]=0 , snake_case__ : int=1 , snake_case__ : Optional[Any]=2 , snake_case__ : List[Any]=False , snake_case__ : Any=3 , snake_case__ : Tuple=2 , snake_case__ : str=3 , snake_case__ : Any=None , snake_case__ : Dict=None , **snake_case__ : Dict , ):
"""simple docstring"""
super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ )
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = feat_extract_norm
SCREAMING_SNAKE_CASE = feat_extract_activation
SCREAMING_SNAKE_CASE = list(snake_case__ )
SCREAMING_SNAKE_CASE = list(snake_case__ )
SCREAMING_SNAKE_CASE = list(snake_case__ )
SCREAMING_SNAKE_CASE = conv_bias
SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE = len(self.conv_dim )
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = feat_proj_dropout
SCREAMING_SNAKE_CASE = final_dropout
SCREAMING_SNAKE_CASE = layerdrop
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = do_stable_layer_norm
SCREAMING_SNAKE_CASE = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE = apply_spec_augment
SCREAMING_SNAKE_CASE = mask_time_prob
SCREAMING_SNAKE_CASE = mask_time_length
SCREAMING_SNAKE_CASE = mask_time_min_masks
SCREAMING_SNAKE_CASE = mask_feature_prob
SCREAMING_SNAKE_CASE = mask_feature_length
SCREAMING_SNAKE_CASE = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE = num_codevectors_per_group
SCREAMING_SNAKE_CASE = num_codevector_groups
SCREAMING_SNAKE_CASE = contrastive_logits_temperature
SCREAMING_SNAKE_CASE = feat_quantizer_dropout
SCREAMING_SNAKE_CASE = num_negatives
SCREAMING_SNAKE_CASE = codevector_dim
SCREAMING_SNAKE_CASE = proj_codevector_dim
SCREAMING_SNAKE_CASE = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE = ctc_loss_reduction
SCREAMING_SNAKE_CASE = ctc_zero_infinity
# adapter
SCREAMING_SNAKE_CASE = add_adapter
SCREAMING_SNAKE_CASE = adapter_kernel_size
SCREAMING_SNAKE_CASE = adapter_stride
SCREAMING_SNAKE_CASE = num_adapter_layers
SCREAMING_SNAKE_CASE = output_hidden_size or hidden_size
SCREAMING_SNAKE_CASE = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE = list(snake_case__ )
SCREAMING_SNAKE_CASE = list(snake_case__ )
SCREAMING_SNAKE_CASE = list(snake_case__ )
SCREAMING_SNAKE_CASE = xvector_output_dim
@property
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 673 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =AudioLDMPipeline
__UpperCamelCase =TEXT_TO_AUDIO_PARAMS
__UpperCamelCase =TEXT_TO_AUDIO_BATCH_PARAMS
__UpperCamelCase =frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(3_2, 6_4) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=3_2 , class_embeddings_concat=snake_case__ , )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , projection_dim=3_2 , )
SCREAMING_SNAKE_CASE = ClapTextModelWithProjection(snake_case__ )
SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=7_7 )
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6_0_0_0 , upsample_initial_channel=1_6 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=snake_case__ , )
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(snake_case__ )
SCREAMING_SNAKE_CASE = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def UpperCamelCase ( self : Optional[int] , snake_case__ : int , snake_case__ : int=0 ):
"""simple docstring"""
if str(snake_case__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE = torch.manual_seed(snake_case__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 2_5_6
SCREAMING_SNAKE_CASE = audio[:1_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder(
snake_case__ , )
SCREAMING_SNAKE_CASE = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE = F.normalize(snake_case__ , dim=-1 )
SCREAMING_SNAKE_CASE = prompt_embeds
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE = negative_prompt
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE = []
for p in [prompt, negative_prompt]:
SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder(
snake_case__ , )
SCREAMING_SNAKE_CASE = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE = F.normalize(snake_case__ , dim=-1 )
embeds.append(snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = embeds
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 'egg cracking'
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ , negative_prompt=snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 2_5_6
SCREAMING_SNAKE_CASE = audio[:1_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_5_6)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_5_6)
# test num_waveforms_per_prompt for single prompt
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=2 , num_waveforms_per_prompt=snake_case__ ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_5_6)
# test num_waveforms_per_prompt for batch of prompts
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=snake_case__ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6)
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config.sampling_rate
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.016 , **snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) / vocoder_sampling_rate == 0.016
SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.032 , **snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) / vocoder_sampling_rate == 0.032
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = ['hey']
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=1 )
SCREAMING_SNAKE_CASE = output.audios.shape
assert audio_shape == (1, 2_5_6)
SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(snake_case__ ).to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=1 )
SCREAMING_SNAKE_CASE = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_5_6)
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=snake_case__ )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case__ )
@slow
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : int , snake_case__ : int , snake_case__ : Tuple="cpu" , snake_case__ : List[str]=torch.floataa , snake_case__ : Optional[Any]=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = np.random.RandomState(snake_case__ ).standard_normal((1, 8, 1_2_8, 1_6) )
SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 2_5
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ).audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 8_1_9_2_0
SCREAMING_SNAKE_CASE = audio[7_7_2_3_0:7_7_2_4_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315] )
SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ).audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 8_1_9_2_0
SCREAMING_SNAKE_CASE = audio[2_7_7_8_0:2_7_7_9_0]
SCREAMING_SNAKE_CASE = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212] )
SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 673 | 1 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
a_ : Any = logging.get_logger(__name__)
a_ : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
a_ : Union[str, Any] = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
a_ : Tuple = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
a_ : Any = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
a_ : str = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
a_ : int = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
a_ : Optional[Any] = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
a_ : Any = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
a_ : Optional[int] = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
a_ : int = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase =DPRContextEncoderTokenizer
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase =DPRQuestionEncoderTokenizer
a_ : Tuple = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
a_ : Any = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
a_ : int = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(SCREAMING_SNAKE_CASE )
class UpperCamelCase :
def __call__( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , snake_case__ : Union[bool, str] = False , snake_case__ : Union[bool, str] = False , snake_case__ : Optional[int] = None , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : Optional[bool] = None , **snake_case__ : str , ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , return_tensors=snake_case__ , return_attention_mask=snake_case__ , **snake_case__ , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE = titles if texts is None else texts
return super().__call__(
snake_case__ , snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , return_tensors=snake_case__ , return_attention_mask=snake_case__ , **snake_case__ , )
SCREAMING_SNAKE_CASE = titles if not isinstance(snake_case__ , snake_case__ ) else [titles]
SCREAMING_SNAKE_CASE = texts if not isinstance(snake_case__ , snake_case__ ) else [texts]
SCREAMING_SNAKE_CASE = len(snake_case__ )
SCREAMING_SNAKE_CASE = questions if not isinstance(snake_case__ , snake_case__ ) else [questions] * n_passages
assert len(snake_case__ ) == len(
snake_case__ ), F"""There should be as many titles than texts but got {len(snake_case__ )} titles and {len(snake_case__ )} texts."""
SCREAMING_SNAKE_CASE = super().__call__(snake_case__ , snake_case__ , padding=snake_case__ , truncation=snake_case__ )['input_ids']
SCREAMING_SNAKE_CASE = super().__call__(snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ )['input_ids']
SCREAMING_SNAKE_CASE = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(snake_case__ , snake_case__ )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE = attention_mask
return self.pad(snake_case__ , padding=snake_case__ , max_length=snake_case__ , return_tensors=snake_case__ )
def UpperCamelCase ( self : Tuple , snake_case__ : BatchEncoding , snake_case__ : DPRReaderOutput , snake_case__ : int = 1_6 , snake_case__ : int = 6_4 , snake_case__ : int = 4 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = reader_input['input_ids']
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = reader_output[:3]
SCREAMING_SNAKE_CASE = len(snake_case__ )
SCREAMING_SNAKE_CASE = sorted(range(snake_case__ ) , reverse=snake_case__ , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE = len(snake_case__ )
SCREAMING_SNAKE_CASE = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=snake_case__ , top_spans=snake_case__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=snake_case__ , start_index=snake_case__ , end_index=snake_case__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(snake_case__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : List[int] , snake_case__ : int , snake_case__ : int , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
for start_index, start_score in enumerate(snake_case__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE = sorted(snake_case__ , key=lambda snake_case__ : x[1] , reverse=snake_case__ )
SCREAMING_SNAKE_CASE = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]"""
SCREAMING_SNAKE_CASE = end_index - start_index + 1
assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(snake_case__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =READER_PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =READER_PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase =["input_ids", "attention_mask"]
__UpperCamelCase =DPRReaderTokenizer
| 673 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase :
def __init__( self : Dict , snake_case__ : str , snake_case__ : str=1_3 , snake_case__ : Tuple=7 , snake_case__ : Tuple=True , snake_case__ : Tuple=True , snake_case__ : List[str]=False , snake_case__ : Any=True , snake_case__ : Union[str, Any]=9_9 , snake_case__ : Dict=3_2 , snake_case__ : Optional[Any]=5 , snake_case__ : Optional[Any]=4 , snake_case__ : Union[str, Any]=3_7 , snake_case__ : Tuple="gelu" , snake_case__ : Dict=0.1 , snake_case__ : Any=0.1 , snake_case__ : int=5_1_2 , snake_case__ : Dict=1_6 , snake_case__ : str=2 , snake_case__ : Any=0.02 , snake_case__ : List[str]=3 , snake_case__ : int=4 , snake_case__ : List[str]=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , use_stable_embedding=snake_case__ , )
def UpperCamelCase ( self : int , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : str , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : str , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , )
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , )
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Tuple , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : int , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , use_cache=snake_case__ , )
SCREAMING_SNAKE_CASE = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0]
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , past_key_values=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCamelCase =(OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCamelCase =(
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'single_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'multi_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCamelCase ( self : str , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = ids_tensor([1, 1_0] , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
SCREAMING_SNAKE_CASE = original_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE = original_model(snake_case__ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
SCREAMING_SNAKE_CASE = scaled_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
| 673 | 1 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase :
def __init__( self : Dict , snake_case__ : str , snake_case__ : str=1_3 , snake_case__ : Tuple=7 , snake_case__ : Tuple=True , snake_case__ : Tuple=True , snake_case__ : List[str]=False , snake_case__ : Any=True , snake_case__ : Union[str, Any]=9_9 , snake_case__ : Dict=3_2 , snake_case__ : Optional[Any]=5 , snake_case__ : Optional[Any]=4 , snake_case__ : Union[str, Any]=3_7 , snake_case__ : Tuple="gelu" , snake_case__ : Dict=0.1 , snake_case__ : Any=0.1 , snake_case__ : int=5_1_2 , snake_case__ : Dict=1_6 , snake_case__ : str=2 , snake_case__ : Any=0.02 , snake_case__ : List[str]=3 , snake_case__ : int=4 , snake_case__ : List[str]=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , use_stable_embedding=snake_case__ , )
def UpperCamelCase ( self : int , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : str , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : str , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , )
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , )
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Tuple , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : int , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , use_cache=snake_case__ , )
SCREAMING_SNAKE_CASE = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0]
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , past_key_values=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCamelCase =(OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCamelCase =(
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'single_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'multi_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCamelCase ( self : str , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = ids_tensor([1, 1_0] , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
SCREAMING_SNAKE_CASE = original_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE = original_model(snake_case__ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
SCREAMING_SNAKE_CASE = scaled_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
| 673 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="openai/whisper-base"
__UpperCamelCase =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__UpperCamelCase ="transcriber"
__UpperCamelCase =WhisperProcessor
__UpperCamelCase =WhisperForConditionalGeneration
__UpperCamelCase =["audio"]
__UpperCamelCase =["text"]
def UpperCamelCase ( self : Dict , snake_case__ : Tuple ):
"""simple docstring"""
return self.pre_processor(snake_case__ , return_tensors='pt' ).input_features
def UpperCamelCase ( self : Optional[int] , snake_case__ : Tuple ):
"""simple docstring"""
return self.model.generate(inputs=snake_case__ )
def UpperCamelCase ( self : str , snake_case__ : Union[str, Any] ):
"""simple docstring"""
return self.pre_processor.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )[0]
| 673 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Any = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="audio-spectrogram-transformer"
def __init__( self : Tuple , snake_case__ : str=7_6_8 , snake_case__ : int=1_2 , snake_case__ : List[str]=1_2 , snake_case__ : Tuple=3_0_7_2 , snake_case__ : List[Any]="gelu" , snake_case__ : Any=0.0 , snake_case__ : Optional[int]=0.0 , snake_case__ : List[str]=0.02 , snake_case__ : Optional[Any]=1E-12 , snake_case__ : List[Any]=1_6 , snake_case__ : Optional[Any]=True , snake_case__ : Tuple=1_0 , snake_case__ : Any=1_0 , snake_case__ : Optional[int]=1_0_2_4 , snake_case__ : Dict=1_2_8 , **snake_case__ : str , ):
"""simple docstring"""
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = qkv_bias
SCREAMING_SNAKE_CASE = frequency_stride
SCREAMING_SNAKE_CASE = time_stride
SCREAMING_SNAKE_CASE = max_length
SCREAMING_SNAKE_CASE = num_mel_bins
| 673 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
a_ : List[str] = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
a_ : Dict = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
a_ : str = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
a_ : int = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def UpperCamelCase ( self : str ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def UpperCamelCase ( self : Dict , snake_case__ : int ):
"""simple docstring"""
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : List[Any]=0.9 , snake_case__ : Optional[Any]=3 , snake_case__ : Any=0.5 ):
"""simple docstring"""
if NLTK_VERSION >= version.Version('3.6.5' ):
SCREAMING_SNAKE_CASE = [
meteor_score.single_meteor_score(
word_tokenize(snake_case__ ) , word_tokenize(snake_case__ ) , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
else:
SCREAMING_SNAKE_CASE = [
meteor_score.single_meteor_score(snake_case__ , snake_case__ , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
return {"meteor": np.mean(snake_case__ )}
| 673 | 1 |
import warnings
from functools import wraps
from typing import Callable
def __lowerCAmelCase ( _UpperCamelCase : Callable ) -> Callable:
'''simple docstring'''
@wraps(_UpperCamelCase )
def _inner_fn(*_UpperCamelCase : List[str] , **_UpperCamelCase : List[str] ):
warnings.warn(
(f"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , _UpperCamelCase , )
return fn(*_UpperCamelCase , **_UpperCamelCase )
return _inner_fn
| 673 |
import numpy as np
def __lowerCAmelCase ( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def __lowerCAmelCase ( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 | 1 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
a_ : Union[str, Any] = logging.getLogger(__name__)
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : Tuple , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Tuple=None ):
"""simple docstring"""
super().__init__(
snake_case__ , question_encoder_tokenizer=snake_case__ , generator_tokenizer=snake_case__ , index=snake_case__ , init_retrieval=snake_case__ , )
SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self : Optional[int] , snake_case__ : int ):
"""simple docstring"""
logger.info('initializing retrieval' )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('dist initialized' )
# needs to be set manually
SCREAMING_SNAKE_CASE = self._infer_socket_ifname()
# avoid clash with the NCCL port
SCREAMING_SNAKE_CASE = str(distributed_port + 1 )
SCREAMING_SNAKE_CASE = dist.new_group(ranks=snake_case__ , backend='gloo' )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('dist not initialized / main' )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
return dist.get_rank(group=self.process_group ) == 0
def UpperCamelCase ( self : Any , snake_case__ : Tuple , snake_case__ : int , snake_case__ : List[Any]=torch.floataa ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.empty(snake_case__ , dtype=snake_case__ )
dist.scatter(snake_case__ , src=0 , scatter_list=snake_case__ , group=self.process_group )
return target_tensor
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
SCREAMING_SNAKE_CASE = next((addr for addr in addrs if addr.startswith('e' )) , snake_case__ )
return ifname
def UpperCamelCase ( self : List[str] , snake_case__ : np.ndarray , snake_case__ : int ):
"""simple docstring"""
if not dist.is_initialized():
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._main_retrieve(snake_case__ , snake_case__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(snake_case__ )
# distributed training
SCREAMING_SNAKE_CASE = dist.get_world_size(group=self.process_group )
# gather logic
SCREAMING_SNAKE_CASE = None
if self._is_main():
SCREAMING_SNAKE_CASE = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(snake_case__ )]
dist.gather(torch.tensor(snake_case__ ) , dst=0 , gather_list=snake_case__ , group=self.process_group )
# scatter logic
SCREAMING_SNAKE_CASE = question_hidden_states.shape[0]
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
if self._is_main():
assert len(snake_case__ ) == world_size
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._main_retrieve(torch.cat(snake_case__ ).numpy() , snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torch.tensor(snake_case__ ), torch.tensor(snake_case__ )
SCREAMING_SNAKE_CASE = self._chunk_tensor(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = self._chunk_tensor(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = self._scattered(snake_case__ , [n_queries, n_docs] , target_type=torch.intaa )
SCREAMING_SNAKE_CASE = self._scattered(snake_case__ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(snake_case__ )
| 673 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a_ : Any = logging.get_logger(__name__)
a_ : Dict = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="van"
def __init__( self : Optional[Any] , snake_case__ : Tuple=2_2_4 , snake_case__ : Dict=3 , snake_case__ : Union[str, Any]=[7, 3, 3, 3] , snake_case__ : str=[4, 2, 2, 2] , snake_case__ : Optional[Any]=[6_4, 1_2_8, 3_2_0, 5_1_2] , snake_case__ : Optional[Any]=[3, 3, 1_2, 3] , snake_case__ : Tuple=[8, 8, 4, 4] , snake_case__ : Any="gelu" , snake_case__ : Dict=0.02 , snake_case__ : List[str]=1E-6 , snake_case__ : int=1E-2 , snake_case__ : Any=0.0 , snake_case__ : Tuple=0.0 , **snake_case__ : Any , ):
"""simple docstring"""
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_sizes
SCREAMING_SNAKE_CASE = strides
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = mlp_ratios
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = layer_scale_init_value
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = dropout_rate
| 673 | 1 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class UpperCamelCase :
def __init__( self : Optional[int] , snake_case__ : List[str] , snake_case__ : Union[str, Any]=1_3 , snake_case__ : Optional[int]=1_0 , snake_case__ : List[Any]=3 , snake_case__ : List[str]=2 , snake_case__ : List[str]=2 , snake_case__ : Union[str, Any]=2 , snake_case__ : Union[str, Any]=True , snake_case__ : int=True , snake_case__ : int=3_2 , snake_case__ : Optional[Any]=5 , snake_case__ : Optional[Any]=4 , snake_case__ : str=3_7 , snake_case__ : str="gelu" , snake_case__ : Optional[int]=0.1 , snake_case__ : int=0.1 , snake_case__ : Any=1_0 , snake_case__ : List[Any]=0.02 , snake_case__ : Union[str, Any]=0.9 , snake_case__ : List[Any]=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = tubelet_size
SCREAMING_SNAKE_CASE = num_frames
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = mask_ratio
SCREAMING_SNAKE_CASE = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
SCREAMING_SNAKE_CASE = int(mask_ratio * self.seq_length )
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def UpperCamelCase ( self : Any , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = VideoMAEModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Dict , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = VideoMAEForPreTraining(snake_case__ )
model.to(snake_case__ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
SCREAMING_SNAKE_CASE = torch.ones((self.num_masks,) )
SCREAMING_SNAKE_CASE = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
SCREAMING_SNAKE_CASE = mask.expand(self.batch_size , -1 ).bool()
SCREAMING_SNAKE_CASE = model(snake_case__ , snake_case__ )
# model only returns predictions for masked patches
SCREAMING_SNAKE_CASE = mask.sum().item()
SCREAMING_SNAKE_CASE = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
__UpperCamelCase =(
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = VideoMAEModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : int , snake_case__ : Dict , snake_case__ : Optional[int]=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = copy.deepcopy(snake_case__ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
SCREAMING_SNAKE_CASE = torch.ones((self.model_tester.num_masks,) )
SCREAMING_SNAKE_CASE = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
SCREAMING_SNAKE_CASE = mask.expand(self.model_tester.batch_size , -1 ).bool()
SCREAMING_SNAKE_CASE = bool_masked_pos.to(snake_case__ )
if return_labels:
if model_class in [
*get_values(snake_case__ ),
]:
SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case__ )
return inputs_dict
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
pass
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(snake_case__ )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case__ )
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case__ )
@slow
def UpperCamelCase ( self : int ):
"""simple docstring"""
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = VideoMAEModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
if not self.has_attentions:
pass
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = self.model_tester.seq_length - self.model_tester.num_masks
SCREAMING_SNAKE_CASE = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
SCREAMING_SNAKE_CASE = outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
SCREAMING_SNAKE_CASE = outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
SCREAMING_SNAKE_CASE = len(snake_case__ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
self.assertEqual(out_len + 1 , len(snake_case__ ) )
SCREAMING_SNAKE_CASE = outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
def check_hidden_states_output(snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : str ):
SCREAMING_SNAKE_CASE = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
SCREAMING_SNAKE_CASE = outputs.hidden_states
SCREAMING_SNAKE_CASE = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(snake_case__ ) , snake_case__ )
SCREAMING_SNAKE_CASE = self.model_tester.seq_length - self.model_tester.num_masks
SCREAMING_SNAKE_CASE = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
pass
def __lowerCAmelCase ( ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
SCREAMING_SNAKE_CASE = np.load(_UpperCamelCase )
return list(_UpperCamelCase )
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self : int ):
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
snake_case__ )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_video()
SCREAMING_SNAKE_CASE = image_processor(snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**snake_case__ )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , snake_case__ )
SCREAMING_SNAKE_CASE = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(snake_case__ )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_video()
SCREAMING_SNAKE_CASE = image_processor(snake_case__ , return_tensors='pt' ).to(snake_case__ )
# add boolean mask, indicating which patches to mask
SCREAMING_SNAKE_CASE = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
SCREAMING_SNAKE_CASE = torch.load(snake_case__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**snake_case__ )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size([1, 1_4_0_8, 1_5_3_6] )
SCREAMING_SNAKE_CASE = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=snake_case__ )
self.assertEqual(outputs.logits.shape , snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , snake_case__ , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
SCREAMING_SNAKE_CASE = torch.tensor([0.5_142] , device=snake_case__ )
self.assertTrue(torch.allclose(outputs.loss , snake_case__ , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
SCREAMING_SNAKE_CASE = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=snake_case__ ).to(
snake_case__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**snake_case__ )
SCREAMING_SNAKE_CASE = torch.tensor(torch.tensor([0.6_469] ) , device=snake_case__ )
self.assertTrue(torch.allclose(outputs.loss , snake_case__ , atol=1E-4 ) )
| 673 |
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : int ) -> list[str]:
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(_UpperCamelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 673 | 1 |
a_ : Union[str, Any] = "Tobias Carryer"
from time import time
class UpperCamelCase :
def __init__( self : Optional[Any] , snake_case__ : int , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Optional[int]=int(time() ) ): # noqa: B008
"""simple docstring"""
SCREAMING_SNAKE_CASE = multiplier
SCREAMING_SNAKE_CASE = increment
SCREAMING_SNAKE_CASE = modulo
SCREAMING_SNAKE_CASE = seed
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
a_ : List[Any] = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 673 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
a_ : Dict = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : Any , **snake_case__ : Optional[int] ):
"""simple docstring"""
super().__init__(**snake_case__ )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , 'vision' )
self.check_model_type(snake_case__ )
def __call__( self : List[Any] , snake_case__ : Union[str, "Image.Image", List[Dict[str, Any]]] , snake_case__ : Union[str, List[str]] = None , **snake_case__ : Union[str, Any] , ):
"""simple docstring"""
if "text_queries" in kwargs:
SCREAMING_SNAKE_CASE = kwargs.pop('text_queries' )
if isinstance(snake_case__ , (str, Image.Image) ):
SCREAMING_SNAKE_CASE = {'image': image, 'candidate_labels': candidate_labels}
else:
SCREAMING_SNAKE_CASE = image
SCREAMING_SNAKE_CASE = super().__call__(snake_case__ , **snake_case__ )
return results
def UpperCamelCase ( self : Union[str, Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {}
if "threshold" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['threshold']
if "top_k" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['top_k']
return {}, {}, postprocess_params
def UpperCamelCase ( self : List[Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = load_image(inputs['image'] )
SCREAMING_SNAKE_CASE = inputs['candidate_labels']
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = candidate_labels.split(',' )
SCREAMING_SNAKE_CASE = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(snake_case__ ):
SCREAMING_SNAKE_CASE = self.tokenizer(snake_case__ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE = self.image_processor(snake_case__ , return_tensors=self.framework )
yield {
"is_last": i == len(snake_case__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def UpperCamelCase ( self : Any , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = model_inputs.pop('target_size' )
SCREAMING_SNAKE_CASE = model_inputs.pop('candidate_label' )
SCREAMING_SNAKE_CASE = model_inputs.pop('is_last' )
SCREAMING_SNAKE_CASE = self.model(**snake_case__ )
SCREAMING_SNAKE_CASE = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : str=0.1 , snake_case__ : Union[str, Any]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
for model_output in model_outputs:
SCREAMING_SNAKE_CASE = model_output['candidate_label']
SCREAMING_SNAKE_CASE = BaseModelOutput(snake_case__ )
SCREAMING_SNAKE_CASE = self.image_processor.post_process_object_detection(
outputs=snake_case__ , threshold=snake_case__ , target_sizes=model_output['target_size'] )[0]
for index in outputs["scores"].nonzero():
SCREAMING_SNAKE_CASE = outputs['scores'][index].item()
SCREAMING_SNAKE_CASE = self._get_bounding_box(outputs['boxes'][index][0] )
SCREAMING_SNAKE_CASE = {'score': score, 'label': label, 'box': box}
results.append(snake_case__ )
SCREAMING_SNAKE_CASE = sorted(snake_case__ , key=lambda snake_case__ : x["score"] , reverse=snake_case__ )
if top_k:
SCREAMING_SNAKE_CASE = results[:top_k]
return results
def UpperCamelCase ( self : List[Any] , snake_case__ : "torch.Tensor" ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = box.int().tolist()
SCREAMING_SNAKE_CASE = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 673 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="openai/whisper-base"
__UpperCamelCase =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__UpperCamelCase ="transcriber"
__UpperCamelCase =WhisperProcessor
__UpperCamelCase =WhisperForConditionalGeneration
__UpperCamelCase =["audio"]
__UpperCamelCase =["text"]
def UpperCamelCase ( self : Dict , snake_case__ : Tuple ):
"""simple docstring"""
return self.pre_processor(snake_case__ , return_tensors='pt' ).input_features
def UpperCamelCase ( self : Optional[int] , snake_case__ : Tuple ):
"""simple docstring"""
return self.model.generate(inputs=snake_case__ )
def UpperCamelCase ( self : str , snake_case__ : Union[str, Any] ):
"""simple docstring"""
return self.pre_processor.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )[0]
| 673 |
def __lowerCAmelCase ( _UpperCamelCase : int = 10_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 2**power
SCREAMING_SNAKE_CASE = str(_UpperCamelCase )
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
for i in list_num:
sum_of_num += int(_UpperCamelCase )
return sum_of_num
if __name__ == "__main__":
a_ : List[str] = int(input("Enter the power of 2: ").strip())
print("2 ^ ", power, " = ", 2**power)
a_ : int = solution(power)
print("Sum of the digits is: ", result)
| 673 | 1 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ : str = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Any=8 ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
SCREAMING_SNAKE_CASE = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=5_12 , _UpperCamelCase : Union[str, Any]=5_12 ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
SCREAMING_SNAKE_CASE = np.array(pil_image.convert('RGB' ) )
SCREAMING_SNAKE_CASE = arr.astype(np.floataa ) / 1_27.5 - 1
SCREAMING_SNAKE_CASE = np.transpose(_UpperCamelCase , [2, 0, 1] )
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).unsqueeze(0 )
return image
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : int , snake_case__ : UNetaDConditionModel , snake_case__ : DDPMScheduler , snake_case__ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=snake_case__ , scheduler=snake_case__ , movq=snake_case__ , )
SCREAMING_SNAKE_CASE = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase ( self : Any , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = min(int(num_inference_steps * strength ) , snake_case__ )
SCREAMING_SNAKE_CASE = max(num_inference_steps - init_timestep , 0 )
SCREAMING_SNAKE_CASE = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase ( self : List[str] , snake_case__ : Dict , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : str=None ):
"""simple docstring"""
if not isinstance(snake_case__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(snake_case__ )}""" )
SCREAMING_SNAKE_CASE = image.to(device=snake_case__ , dtype=snake_case__ )
SCREAMING_SNAKE_CASE = batch_size * num_images_per_prompt
if image.shape[1] == 4:
SCREAMING_SNAKE_CASE = image
else:
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(snake_case__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case__ )
]
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
else:
SCREAMING_SNAKE_CASE = self.movq.encode(snake_case__ ).latent_dist.sample(snake_case__ )
SCREAMING_SNAKE_CASE = self.movq.config.scaling_factor * init_latents
SCREAMING_SNAKE_CASE = torch.cat([init_latents] , dim=0 )
SCREAMING_SNAKE_CASE = init_latents.shape
SCREAMING_SNAKE_CASE = randn_tensor(snake_case__ , generator=snake_case__ , device=snake_case__ , dtype=snake_case__ )
# get latents
SCREAMING_SNAKE_CASE = self.scheduler.add_noise(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = init_latents
return latents
def UpperCamelCase ( self : int , snake_case__ : List[str]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
SCREAMING_SNAKE_CASE = torch.device(F"""cuda:{gpu_id}""" )
SCREAMING_SNAKE_CASE = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case__ , snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[int]=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
SCREAMING_SNAKE_CASE = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=snake_case__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE = None
for cpu_offloaded_model in [self.unet, self.movq]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cpu_offload_with_hook(snake_case__ , snake_case__ , prev_module_hook=snake_case__ )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case__ )
def __call__( self : str , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : int = 5_1_2 , snake_case__ : int = 5_1_2 , snake_case__ : int = 1_0_0 , snake_case__ : float = 4.0 , snake_case__ : float = 0.3 , snake_case__ : int = 1 , snake_case__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._execution_device
SCREAMING_SNAKE_CASE = guidance_scale > 1.0
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = image_embeds.shape[0]
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE = image_embeds.repeat_interleave(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = negative_image_embeds.repeat_interleave(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = [image]
if not all(isinstance(snake_case__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(snake_case__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
SCREAMING_SNAKE_CASE = torch.cat([prepare_image(snake_case__ , snake_case__ , snake_case__ ) for i in image] , dim=0 )
SCREAMING_SNAKE_CASE = image.to(dtype=image_embeds.dtype , device=snake_case__ )
SCREAMING_SNAKE_CASE = self.movq.encode(snake_case__ )['latents']
SCREAMING_SNAKE_CASE = latents.repeat_interleave(snake_case__ , dim=0 )
self.scheduler.set_timesteps(snake_case__ , device=snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_timesteps(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = timesteps[:1].repeat(batch_size * num_images_per_prompt )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = downscale_height_and_width(snake_case__ , snake_case__ , self.movq_scale_factor )
SCREAMING_SNAKE_CASE = self.prepare_latents(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , image_embeds.dtype , snake_case__ , snake_case__ )
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE = {'image_embeds': image_embeds}
SCREAMING_SNAKE_CASE = self.unet(
sample=snake_case__ , timestep=snake_case__ , encoder_hidden_states=snake_case__ , added_cond_kwargs=snake_case__ , return_dict=snake_case__ , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE = self.scheduler.step(
snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ , )[0]
# post-processing
SCREAMING_SNAKE_CASE = self.movq.decode(snake_case__ , force_not_quantize=snake_case__ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case__ )
| 673 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="facebook/bart-large-mnli"
__UpperCamelCase =(
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
__UpperCamelCase ="text_classifier"
__UpperCamelCase =AutoTokenizer
__UpperCamelCase =AutoModelForSequenceClassification
__UpperCamelCase =["text", ["text"]]
__UpperCamelCase =["text"]
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().setup()
SCREAMING_SNAKE_CASE = self.model.config
SCREAMING_SNAKE_CASE = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
SCREAMING_SNAKE_CASE = int(snake_case__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[str] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = labels
return self.pre_processor(
[text] * len(snake_case__ ) , [F"""This example is {label}""" for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCamelCase ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = outputs.logits
SCREAMING_SNAKE_CASE = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 673 | 1 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class UpperCamelCase ( nn.Module ):
def __init__( self : Optional[int] , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : Optional[int]=0.0 , snake_case__ : Optional[int] = None , snake_case__ : str = "geglu" , snake_case__ : Optional[int] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = True , snake_case__ : str = "layer_norm" , snake_case__ : bool = False , ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE = only_cross_attention
SCREAMING_SNAKE_CASE = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
SCREAMING_SNAKE_CASE = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
F""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
SCREAMING_SNAKE_CASE = AdaLayerNorm(snake_case__ , snake_case__ )
elif self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE = AdaLayerNormZero(snake_case__ , snake_case__ )
else:
SCREAMING_SNAKE_CASE = nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ )
SCREAMING_SNAKE_CASE = Attention(
query_dim=snake_case__ , heads=snake_case__ , dim_head=snake_case__ , dropout=snake_case__ , bias=snake_case__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=snake_case__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
SCREAMING_SNAKE_CASE = (
AdaLayerNorm(snake_case__ , snake_case__ )
if self.use_ada_layer_norm
else nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ )
)
SCREAMING_SNAKE_CASE = Attention(
query_dim=snake_case__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=snake_case__ , dim_head=snake_case__ , dropout=snake_case__ , bias=snake_case__ , upcast_attention=snake_case__ , ) # is self-attn if encoder_hidden_states is none
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
# 3. Feed-forward
SCREAMING_SNAKE_CASE = nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ )
SCREAMING_SNAKE_CASE = FeedForward(snake_case__ , dropout=snake_case__ , activation_fn=snake_case__ , final_dropout=snake_case__ )
# let chunk size default to None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = 0
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = chunk_size
SCREAMING_SNAKE_CASE = dim
def UpperCamelCase ( self : Tuple , snake_case__ : torch.FloatTensor , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[torch.LongTensor] = None , snake_case__ : Dict[str, Any] = None , snake_case__ : Optional[torch.LongTensor] = None , ):
"""simple docstring"""
if self.use_ada_layer_norm:
SCREAMING_SNAKE_CASE = self.norma(snake_case__ , snake_case__ )
elif self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.norma(
snake_case__ , snake_case__ , snake_case__ , hidden_dtype=hidden_states.dtype )
else:
SCREAMING_SNAKE_CASE = self.norma(snake_case__ )
SCREAMING_SNAKE_CASE = cross_attention_kwargs if cross_attention_kwargs is not None else {}
SCREAMING_SNAKE_CASE = self.attna(
snake_case__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=snake_case__ , **snake_case__ , )
if self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE = gate_msa.unsqueeze(1 ) * attn_output
SCREAMING_SNAKE_CASE = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
SCREAMING_SNAKE_CASE = (
self.norma(snake_case__ , snake_case__ ) if self.use_ada_layer_norm else self.norma(snake_case__ )
)
SCREAMING_SNAKE_CASE = self.attna(
snake_case__ , encoder_hidden_states=snake_case__ , attention_mask=snake_case__ , **snake_case__ , )
SCREAMING_SNAKE_CASE = attn_output + hidden_states
# 3. Feed-forward
SCREAMING_SNAKE_CASE = self.norma(snake_case__ )
if self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
SCREAMING_SNAKE_CASE = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
SCREAMING_SNAKE_CASE = torch.cat(
[self.ff(snake_case__ ) for hid_slice in norm_hidden_states.chunk(snake_case__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
SCREAMING_SNAKE_CASE = self.ff(snake_case__ )
if self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE = gate_mlp.unsqueeze(1 ) * ff_output
SCREAMING_SNAKE_CASE = ff_output + hidden_states
return hidden_states
class UpperCamelCase ( nn.Module ):
def __init__( self : Union[str, Any] , snake_case__ : int , snake_case__ : Optional[int] = None , snake_case__ : int = 4 , snake_case__ : float = 0.0 , snake_case__ : str = "geglu" , snake_case__ : bool = False , ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE = int(dim * mult )
SCREAMING_SNAKE_CASE = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
SCREAMING_SNAKE_CASE = GELU(snake_case__ , snake_case__ )
if activation_fn == "gelu-approximate":
SCREAMING_SNAKE_CASE = GELU(snake_case__ , snake_case__ , approximate='tanh' )
elif activation_fn == "geglu":
SCREAMING_SNAKE_CASE = GEGLU(snake_case__ , snake_case__ )
elif activation_fn == "geglu-approximate":
SCREAMING_SNAKE_CASE = ApproximateGELU(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = nn.ModuleList([] )
# project in
self.net.append(snake_case__ )
# project dropout
self.net.append(nn.Dropout(snake_case__ ) )
# project out
self.net.append(nn.Linear(snake_case__ , snake_case__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(snake_case__ ) )
def UpperCamelCase ( self : List[str] , snake_case__ : int ):
"""simple docstring"""
for module in self.net:
SCREAMING_SNAKE_CASE = module(snake_case__ )
return hidden_states
class UpperCamelCase ( nn.Module ):
def __init__( self : Dict , snake_case__ : int , snake_case__ : int , snake_case__ : str = "none" ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE = nn.Linear(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = approximate
def UpperCamelCase ( self : str , snake_case__ : Any ):
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(snake_case__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def UpperCamelCase ( self : List[str] , snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.proj(snake_case__ )
SCREAMING_SNAKE_CASE = self.gelu(snake_case__ )
return hidden_states
class UpperCamelCase ( nn.Module ):
def __init__( self : Optional[int] , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE = nn.Linear(snake_case__ , dim_out * 2 )
def UpperCamelCase ( self : int , snake_case__ : List[str] ):
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(snake_case__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.proj(snake_case__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(snake_case__ )
class UpperCamelCase ( nn.Module ):
def __init__( self : List[str] , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE = nn.Linear(snake_case__ , snake_case__ )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.proj(snake_case__ )
return x * torch.sigmoid(1.702 * x )
class UpperCamelCase ( nn.Module ):
def __init__( self : Any , snake_case__ : Optional[Any] , snake_case__ : Any ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE = nn.Embedding(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = nn.SiLU()
SCREAMING_SNAKE_CASE = nn.Linear(snake_case__ , embedding_dim * 2 )
SCREAMING_SNAKE_CASE = nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ )
def UpperCamelCase ( self : List[Any] , snake_case__ : Dict , snake_case__ : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.linear(self.silu(self.emb(snake_case__ ) ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torch.chunk(snake_case__ , 2 )
SCREAMING_SNAKE_CASE = self.norm(snake_case__ ) * (1 + scale) + shift
return x
class UpperCamelCase ( nn.Module ):
def __init__( self : Optional[int] , snake_case__ : int , snake_case__ : Optional[int] ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE = CombinedTimestepLabelEmbeddings(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = nn.SiLU()
SCREAMING_SNAKE_CASE = nn.Linear(snake_case__ , 6 * embedding_dim , bias=snake_case__ )
SCREAMING_SNAKE_CASE = nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ , eps=1E-6 )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : Dict , snake_case__ : Any=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.linear(self.silu(self.emb(snake_case__ , snake_case__ , hidden_dtype=snake_case__ ) ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = emb.chunk(6 , dim=1 )
SCREAMING_SNAKE_CASE = self.norm(snake_case__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class UpperCamelCase ( nn.Module ):
def __init__( self : Dict , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : Optional[str] = None , snake_case__ : float = 1E-5 ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE = num_groups
SCREAMING_SNAKE_CASE = eps
if act_fn is None:
SCREAMING_SNAKE_CASE = None
else:
SCREAMING_SNAKE_CASE = get_activation(snake_case__ )
SCREAMING_SNAKE_CASE = nn.Linear(snake_case__ , out_dim * 2 )
def UpperCamelCase ( self : Dict , snake_case__ : int , snake_case__ : List[str] ):
"""simple docstring"""
if self.act:
SCREAMING_SNAKE_CASE = self.act(snake_case__ )
SCREAMING_SNAKE_CASE = self.linear(snake_case__ )
SCREAMING_SNAKE_CASE = emb[:, :, None, None]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = emb.chunk(2 , dim=1 )
SCREAMING_SNAKE_CASE = F.group_norm(snake_case__ , self.num_groups , eps=self.eps )
SCREAMING_SNAKE_CASE = x * (1 + scale) + shift
return x
| 673 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
a_ : str = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
a_ : int = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.1_5},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
a_ : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a_ : List[Any] = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
a_ : Any = "allenai"
def __lowerCAmelCase ( _UpperCamelCase : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict((re.sub(R'@@$' , '' , _UpperCamelCase ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , _UpperCamelCase ), v) for k, v in d.items() )
SCREAMING_SNAKE_CASE = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
SCREAMING_SNAKE_CASE = d[k] # restore
return da
def __lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : List[str] ) -> Dict:
'''simple docstring'''
assert os.path.exists(_UpperCamelCase )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
SCREAMING_SNAKE_CASE = basename(_UpperCamelCase )
SCREAMING_SNAKE_CASE = dirname(_UpperCamelCase )
SCREAMING_SNAKE_CASE = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
SCREAMING_SNAKE_CASE = cls.hub_models()
SCREAMING_SNAKE_CASE = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
SCREAMING_SNAKE_CASE = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f"""using checkpoint {checkpoint_file}""" )
SCREAMING_SNAKE_CASE = hub_utils.from_pretrained(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , archive_map=_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = vars(chkpt['args']['model'] )
SCREAMING_SNAKE_CASE = args['source_lang']
SCREAMING_SNAKE_CASE = args['target_lang']
SCREAMING_SNAKE_CASE = dirname(_UpperCamelCase )
SCREAMING_SNAKE_CASE = basename(_UpperCamelCase )
# dicts
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , f"""dict.{src_lang}.txt""" )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , f"""dict.{tgt_lang}.txt""" )
SCREAMING_SNAKE_CASE = Dictionary.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = rewrite_dict_keys(src_dict.indices )
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'vocab-src.json' )
print(f"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
SCREAMING_SNAKE_CASE = True
for k in src_vocab.keys():
if not k.islower():
SCREAMING_SNAKE_CASE = False
break
SCREAMING_SNAKE_CASE = Dictionary.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = rewrite_dict_keys(tgt_dict.indices )
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'vocab-tgt.json' )
print(f"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# merges_file (bpecodes)
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
if os.path.exists(_UpperCamelCase ):
break
with open(_UpperCamelCase , encoding='utf-8' ) as fin:
SCREAMING_SNAKE_CASE = fin.read()
SCREAMING_SNAKE_CASE = re.sub(R' \d+$' , '' , _UpperCamelCase , 0 , re.M ) # remove frequency number
print(f"""Generating {merges_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as fout:
fout.write(_UpperCamelCase )
# model config
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f"""need to extend tokenizer to support bpe={args['bpe']}"""
assert args["tokenizer"] == "moses", f"""need to extend tokenizer to support bpe={args['tokenizer']}"""
SCREAMING_SNAKE_CASE = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
SCREAMING_SNAKE_CASE = 5
SCREAMING_SNAKE_CASE = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
SCREAMING_SNAKE_CASE = best_score_hparams[model_dir]['length_penalty']
else:
SCREAMING_SNAKE_CASE = 1.0
print(f"""Generating {fsmt_model_config_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# tokenizer config
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = {
'langs': [src_lang, tgt_lang],
'model_max_length': 10_24,
'do_lower_case': do_lower_case,
}
print(f"""Generating {fsmt_tokenizer_config_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# model
SCREAMING_SNAKE_CASE = chkpt['models'][0]
SCREAMING_SNAKE_CASE = model.state_dict()
# rename keys to start with 'model.'
SCREAMING_SNAKE_CASE = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
SCREAMING_SNAKE_CASE = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = FSMTConfig.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = FSMTForConditionalGeneration(_UpperCamelCase )
# check that it loads ok
model_new.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
# save
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(_UpperCamelCase , _UpperCamelCase )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f"""cd {data_root}""" )
print(f"""transformers-cli upload {model_dir}""" )
if __name__ == "__main__":
a_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a_ : int = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 673 | 1 |
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
a_ : Union[str, Any] = logging.get_logger(__name__)
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : str=False ) -> Tuple:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
if not is_sharded:
SCREAMING_SNAKE_CASE = os.path.abspath(_UpperCamelCase )
logger.info(f"""Loading PyTorch weights from {pt_path}""" )
SCREAMING_SNAKE_CASE = torch.load(_UpperCamelCase , map_location='cpu' )
logger.info(f"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" )
SCREAMING_SNAKE_CASE = convert_pytorch_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
SCREAMING_SNAKE_CASE = convert_pytorch_sharded_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
return flax_state_dict
def __lowerCAmelCase ( _UpperCamelCase : Tuple[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, jnp.ndarray] , _UpperCamelCase : str , ) -> (Tuple[str], np.ndarray):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(_UpperCamelCase : Tuple[str] ) -> bool:
return len(set(_UpperCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
SCREAMING_SNAKE_CASE = pt_tuple_key[:-1] + ('scale',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
SCREAMING_SNAKE_CASE = pt_tuple_key[:-1] + ('mean',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
SCREAMING_SNAKE_CASE = pt_tuple_key[:-1] + ('var',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
SCREAMING_SNAKE_CASE = pt_tuple_key[:-1] + ('embedding',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
SCREAMING_SNAKE_CASE = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
SCREAMING_SNAKE_CASE = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
SCREAMING_SNAKE_CASE = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
SCREAMING_SNAKE_CASE = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
SCREAMING_SNAKE_CASE = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
SCREAMING_SNAKE_CASE = pt_tuple_key[-2] + '_g'
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
SCREAMING_SNAKE_CASE = pt_tuple_key[-2] + '_v'
if name is not None:
SCREAMING_SNAKE_CASE = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {k: v.numpy() for k, v in pt_state_dict.items()}
SCREAMING_SNAKE_CASE = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
SCREAMING_SNAKE_CASE = flax_model.params['params']
else:
SCREAMING_SNAKE_CASE = flax_model.params
SCREAMING_SNAKE_CASE = flatten_dict(_UpperCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
SCREAMING_SNAKE_CASE = flatten_dict(flax_model.params['batch_stats'] )
random_flax_state_dict.update(_UpperCamelCase )
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
SCREAMING_SNAKE_CASE = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
SCREAMING_SNAKE_CASE = tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
SCREAMING_SNAKE_CASE = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
SCREAMING_SNAKE_CASE = pt_tuple_key[1:]
# Correctly rename weight parameters
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
SCREAMING_SNAKE_CASE = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
SCREAMING_SNAKE_CASE = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
SCREAMING_SNAKE_CASE = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
SCREAMING_SNAKE_CASE = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
SCREAMING_SNAKE_CASE = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] ) -> str:
'''simple docstring'''
import torch
# Load the index
SCREAMING_SNAKE_CASE = {}
for shard_file in shard_filenames:
# load using msgpack utils
SCREAMING_SNAKE_CASE = torch.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = {k: v.numpy() for k, v in pt_state_dict.items()}
SCREAMING_SNAKE_CASE = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
SCREAMING_SNAKE_CASE = flax_model.params['params']
SCREAMING_SNAKE_CASE = flatten_dict(_UpperCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params['batch_stats'] ) )
else:
SCREAMING_SNAKE_CASE = flax_model.params
SCREAMING_SNAKE_CASE = flatten_dict(_UpperCamelCase )
SCREAMING_SNAKE_CASE = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
SCREAMING_SNAKE_CASE = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
SCREAMING_SNAKE_CASE = tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
SCREAMING_SNAKE_CASE = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
SCREAMING_SNAKE_CASE = pt_tuple_key[1:]
# Correctly rename weight parameters
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
SCREAMING_SNAKE_CASE = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
SCREAMING_SNAKE_CASE = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
SCREAMING_SNAKE_CASE = jnp.asarray(_UpperCamelCase )
continue
if "var" in flax_key[-1]:
SCREAMING_SNAKE_CASE = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
SCREAMING_SNAKE_CASE = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
SCREAMING_SNAKE_CASE = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = os.path.abspath(_UpperCamelCase )
logger.info(f"""Loading Flax weights from {flax_checkpoint_path}""" )
# import correct flax class
SCREAMING_SNAKE_CASE = getattr(_UpperCamelCase , 'Flax' + model.__class__.__name__ )
# load flax weight dict
with open(_UpperCamelCase , 'rb' ) as state_f:
try:
SCREAMING_SNAKE_CASE = from_bytes(_UpperCamelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(_UpperCamelCase , _UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int] ) -> Any:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
SCREAMING_SNAKE_CASE = flatten_dict(jax.tree_util.tree_map(lambda _UpperCamelCase : x.dtype == jnp.bfloataa , _UpperCamelCase ) ).values()
if any(_UpperCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
SCREAMING_SNAKE_CASE = jax.tree_util.tree_map(
lambda _UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _UpperCamelCase )
SCREAMING_SNAKE_CASE = flatten_dict(_UpperCamelCase )
SCREAMING_SNAKE_CASE = pt_model.state_dict()
SCREAMING_SNAKE_CASE = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
SCREAMING_SNAKE_CASE = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
SCREAMING_SNAKE_CASE = flax_key_tuple[0] == pt_model.base_model_prefix
SCREAMING_SNAKE_CASE = '.'.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
SCREAMING_SNAKE_CASE = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
SCREAMING_SNAKE_CASE = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_UpperCamelCase ) not in pt_model_dict:
# conv layer
SCREAMING_SNAKE_CASE = flax_key_tuple[:-1] + ('weight',)
SCREAMING_SNAKE_CASE = jnp.transpose(_UpperCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCamelCase ) not in pt_model_dict:
# linear layer
SCREAMING_SNAKE_CASE = flax_key_tuple[:-1] + ('weight',)
SCREAMING_SNAKE_CASE = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
SCREAMING_SNAKE_CASE = flax_key_tuple[:-1] + ('weight',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
SCREAMING_SNAKE_CASE = flax_key_tuple[:-1] + ('running_mean',)
elif "var" in flax_key_tuple[-1]:
SCREAMING_SNAKE_CASE = flax_key_tuple[:-1] + ('running_var',)
if "batch_stats" in flax_state:
SCREAMING_SNAKE_CASE = '.'.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
SCREAMING_SNAKE_CASE = '.'.join(_UpperCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
SCREAMING_SNAKE_CASE = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
SCREAMING_SNAKE_CASE = key.split('.' )
SCREAMING_SNAKE_CASE = None
if key_components[-3::2] == ["parametrizations", "original0"]:
SCREAMING_SNAKE_CASE = key_components[-2] + '_g'
elif key_components[-3::2] == ["parametrizations", "original1"]:
SCREAMING_SNAKE_CASE = key_components[-2] + '_v'
if name is not None:
SCREAMING_SNAKE_CASE = key_components[:-3] + [name]
SCREAMING_SNAKE_CASE = '.'.join(_UpperCamelCase )
SCREAMING_SNAKE_CASE = key
if flax_key in special_pt_names:
SCREAMING_SNAKE_CASE = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
SCREAMING_SNAKE_CASE = np.asarray(_UpperCamelCase ) if not isinstance(_UpperCamelCase , np.ndarray ) else flax_tensor
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase )
# remove from missing keys
missing_keys.remove(_UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_UpperCamelCase )
pt_model.load_state_dict(_UpperCamelCase )
# re-transform missing_keys to list
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
else:
logger.warning(f"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" )
if len(_UpperCamelCase ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
' use it for predictions and inference.' )
else:
logger.warning(
f"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"""
'If your task is similar to the task the model of the checkpoint was trained on, '
f"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" )
return pt_model
| 673 |
import random
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : float , _UpperCamelCase : bool = False ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {i: [] for i in range(_UpperCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_UpperCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_UpperCamelCase ):
for j in range(i + 1 , _UpperCamelCase ):
if random.random() < probability:
graph[i].append(_UpperCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_UpperCamelCase )
return graph
def __lowerCAmelCase ( _UpperCamelCase : int ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(_UpperCamelCase ) if i != j] for i in range(_UpperCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 | 1 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__UpperCamelCase =(
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : Tuple , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : str=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class in get_values(snake_case__ ):
SCREAMING_SNAKE_CASE = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Dict=1_3 , snake_case__ : List[Any]=7 , snake_case__ : Optional[int]=True , snake_case__ : Any=True , snake_case__ : Optional[Any]=True , snake_case__ : Optional[int]=True , snake_case__ : Optional[int]=9_9 , snake_case__ : Any=3_2 , snake_case__ : Union[str, Any]=3_2 , snake_case__ : List[str]=2 , snake_case__ : Optional[int]=4 , snake_case__ : str=3_7 , snake_case__ : Optional[Any]="gelu" , snake_case__ : List[str]=0.1 , snake_case__ : List[str]=0.1 , snake_case__ : Any=5_1_2 , snake_case__ : List[Any]=1_6 , snake_case__ : List[str]=2 , snake_case__ : Tuple=0.02 , snake_case__ : Tuple=3 , snake_case__ : Any=4 , snake_case__ : Tuple=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = embedding_size
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : str , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFMobileBertModel(config=snake_case__ )
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE = model(snake_case__ )
SCREAMING_SNAKE_CASE = [input_ids, input_mask]
SCREAMING_SNAKE_CASE = model(snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase ( self : Tuple , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFMobileBertForMaskedLM(config=snake_case__ )
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Any , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFMobileBertForNextSentencePrediction(config=snake_case__ )
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Any , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFMobileBertForPreTraining(config=snake_case__ )
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCamelCase ( self : List[str] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : Any , snake_case__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = TFMobileBertForSequenceClassification(config=snake_case__ )
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : Tuple , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = TFMobileBertForMultipleChoice(config=snake_case__ )
SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self : List[str] , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : List[str] , snake_case__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = TFMobileBertForTokenClassification(config=snake_case__ )
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self : List[str] , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFMobileBertForQuestionAnswering(config=snake_case__ )
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFMobileBertModelTest.TFMobileBertModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case__ )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case__ )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case__ )
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case__ )
@slow
def UpperCamelCase ( self : str ):
"""simple docstring"""
for model_name in ["google/mobilebert-uncased"]:
SCREAMING_SNAKE_CASE = TFMobileBertModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_tf
class UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased' )
SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE = model(snake_case__ )[0]
SCREAMING_SNAKE_CASE = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , snake_case__ )
SCREAMING_SNAKE_CASE = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case__ , atol=1E-4 )
| 673 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
def __init__( self : List[str] , snake_case__ : Dict , snake_case__ : Optional[Any]=1_3 , snake_case__ : Union[str, Any]=7 , snake_case__ : List[str]=True , snake_case__ : Any=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=9_9 , snake_case__ : str=3_2 , snake_case__ : Dict=5 , snake_case__ : str=4 , snake_case__ : int=3_7 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : Any=0.1 , snake_case__ : Dict=0.1 , snake_case__ : Optional[Any]=5_1_2 , snake_case__ : List[Any]=1_6 , snake_case__ : str=2 , snake_case__ : int=0.02 , snake_case__ : List[str]=3 , snake_case__ : Dict=4 , snake_case__ : str=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def UpperCamelCase ( self : List[str] , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ , token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : List[Any] , snake_case__ : str , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : int , snake_case__ : int , snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = NystromformerForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = NystromformerForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self : int , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = NystromformerForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase =(
{
"feature-extraction": NystromformerModel,
"fill-mask": NystromformerForMaskedLM,
"question-answering": NystromformerForQuestionAnswering,
"text-classification": NystromformerForSequenceClassification,
"token-classification": NystromformerForTokenClassification,
"zero-shot": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
@slow
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(snake_case__ )[0]
SCREAMING_SNAKE_CASE = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , snake_case__ )
SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.4_532, -0.0_936, 0.5_137], [-0.2_676, 0.0_628, 0.6_186], [-0.3_629, -0.1_726, 0.4_716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'the [MASK] of Belgium is Brussels'
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = tokenizer(snake_case__ , return_tensors='pt' )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(encoding.input_ids ).logits
SCREAMING_SNAKE_CASE = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(snake_case__ ) , 'capital' )
| 673 | 1 |
import argparse
import datetime
def __lowerCAmelCase ( _UpperCamelCase : str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
'0': 'Sunday',
'1': 'Monday',
'2': 'Tuesday',
'3': 'Wednesday',
'4': 'Thursday',
'5': 'Friday',
'6': 'Saturday',
}
SCREAMING_SNAKE_CASE = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(_UpperCamelCase ) < 11:
raise ValueError('Must be 10 characters long' )
# Get month
SCREAMING_SNAKE_CASE = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('Month must be between 1 - 12' )
SCREAMING_SNAKE_CASE = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get day
SCREAMING_SNAKE_CASE = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('Date must be between 1 - 31' )
# Get second separator
SCREAMING_SNAKE_CASE = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get year
SCREAMING_SNAKE_CASE = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 85_00:
raise ValueError(
'Year out of range. There has to be some sort of limit...right?' )
# Get datetime obj for validation
SCREAMING_SNAKE_CASE = datetime.date(int(_UpperCamelCase ) , int(_UpperCamelCase ) , int(_UpperCamelCase ) )
# Start math
if m <= 2:
SCREAMING_SNAKE_CASE = y - 1
SCREAMING_SNAKE_CASE = m + 12
# maths var
SCREAMING_SNAKE_CASE = int(str(_UpperCamelCase )[:2] )
SCREAMING_SNAKE_CASE = int(str(_UpperCamelCase )[2:] )
SCREAMING_SNAKE_CASE = int(2.6 * m - 5.39 )
SCREAMING_SNAKE_CASE = int(c / 4 )
SCREAMING_SNAKE_CASE = int(k / 4 )
SCREAMING_SNAKE_CASE = int(d + k )
SCREAMING_SNAKE_CASE = int(t + u + v + x )
SCREAMING_SNAKE_CASE = int(z - (2 * c) )
SCREAMING_SNAKE_CASE = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('The date was evaluated incorrectly. Contact developer.' )
# Response
SCREAMING_SNAKE_CASE = f"""Your date {date_input}, is a {days[str(_UpperCamelCase )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : Any = argparse.ArgumentParser(
description=(
"Find out what day of the week nearly any date is or was. Enter "
"date as a string in the mm-dd-yyyy or mm/dd/yyyy format"
)
)
parser.add_argument(
"date_input", type=str, help="Date as a string (mm-dd-yyyy or mm/dd/yyyy)"
)
a_ : Optional[Any] = parser.parse_args()
zeller(args.date_input)
| 673 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 673 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =42
__UpperCamelCase =42
__UpperCamelCase =42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 673 |
import heapq
import sys
import numpy as np
a_ : Optional[int] = tuple[int, int]
class UpperCamelCase :
def __init__( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('inf' )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return len(self.elements ) == 0
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(snake_case__ )
else:
# update
# print("update", item)
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def UpperCamelCase ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
if item in self.set:
self.set.remove(snake_case__ )
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def UpperCamelCase ( self : str ):
"""simple docstring"""
return self.elements[0][1]
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
self.set.remove(snake_case__ )
return (priority, item)
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
return np.linalg.norm(a - b )
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Dict:
'''simple docstring'''
return consistent_heuristic(_UpperCamelCase , _UpperCamelCase ) // t
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Optional[int]:
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : int , _UpperCamelCase : TPos , _UpperCamelCase : dict[TPos, float] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = g_function[start] + Wa * heuristics[i](_UpperCamelCase , _UpperCamelCase )
return ans
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.chararray((n, n) )
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = '*'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (j, (n - 1) - i) in blocks:
SCREAMING_SNAKE_CASE = '#'
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = x
# print(x)
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[x]
SCREAMING_SNAKE_CASE = '-'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
print(_UpperCamelCase , end=' ' )
SCREAMING_SNAKE_CASE = back_pointer[x]
print(_UpperCamelCase )
sys.exit()
def __lowerCAmelCase ( _UpperCamelCase : TPos ) -> Any:
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , ) -> List[Any]:
'''simple docstring'''
for itera in range(_UpperCamelCase ):
open_list[itera].remove_element(_UpperCamelCase )
# print("s", s)
# print("j", j)
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = s
SCREAMING_SNAKE_CASE = (x - 1, y)
SCREAMING_SNAKE_CASE = (x + 1, y)
SCREAMING_SNAKE_CASE = (x, y + 1)
SCREAMING_SNAKE_CASE = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_UpperCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_UpperCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = float('inf' )
if valid(_UpperCamelCase ) and g_function[neighbours] > g_function[s] + 1:
SCREAMING_SNAKE_CASE = g_function[s] + 1
SCREAMING_SNAKE_CASE = s
if neighbours not in close_list_anchor:
open_list[0].put(_UpperCamelCase , key(_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , _UpperCamelCase ):
if key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) <= Wa * key(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ):
open_list[j].put(
_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
def __lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a_ : str = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a_ : List[str] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a_ : Union[str, Any] = make_common_ground()
a_ : Tuple = blocks_blk
# hyper parameters
a_ : Any = 1
a_ : List[str] = 1
a_ : Union[str, Any] = 20
a_ : Optional[Any] = 3 # one consistent and two other inconsistent
# start and end destination
a_ : int = (0, 0)
a_ : Optional[int] = (n - 1, n - 1)
a_ : Union[str, Any] = 1
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos , _UpperCamelCase : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {start: 0, goal: float('inf' )}
SCREAMING_SNAKE_CASE = {start: -1, goal: -1}
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
for i in range(_UpperCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , _UpperCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = open_list[i].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_inad.append(_UpperCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = open_list[0].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_anchor.append(_UpperCamelCase )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_UpperCamelCase ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 673 | 1 |
def __lowerCAmelCase ( _UpperCamelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = sum(_UpperCamelCase )
SCREAMING_SNAKE_CASE = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE = True
for i in range(1 , s + 1 ):
SCREAMING_SNAKE_CASE = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
SCREAMING_SNAKE_CASE = dp[i][j - 1]
if arr[i - 1] <= j:
SCREAMING_SNAKE_CASE = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
SCREAMING_SNAKE_CASE = s - 2 * j
break
return diff
| 673 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ : str = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Any=8 ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
SCREAMING_SNAKE_CASE = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=5_12 , _UpperCamelCase : Union[str, Any]=5_12 ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
SCREAMING_SNAKE_CASE = np.array(pil_image.convert('RGB' ) )
SCREAMING_SNAKE_CASE = arr.astype(np.floataa ) / 1_27.5 - 1
SCREAMING_SNAKE_CASE = np.transpose(_UpperCamelCase , [2, 0, 1] )
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).unsqueeze(0 )
return image
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : int , snake_case__ : UNetaDConditionModel , snake_case__ : DDPMScheduler , snake_case__ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=snake_case__ , scheduler=snake_case__ , movq=snake_case__ , )
SCREAMING_SNAKE_CASE = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase ( self : Any , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = min(int(num_inference_steps * strength ) , snake_case__ )
SCREAMING_SNAKE_CASE = max(num_inference_steps - init_timestep , 0 )
SCREAMING_SNAKE_CASE = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase ( self : List[str] , snake_case__ : Dict , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : str=None ):
"""simple docstring"""
if not isinstance(snake_case__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(snake_case__ )}""" )
SCREAMING_SNAKE_CASE = image.to(device=snake_case__ , dtype=snake_case__ )
SCREAMING_SNAKE_CASE = batch_size * num_images_per_prompt
if image.shape[1] == 4:
SCREAMING_SNAKE_CASE = image
else:
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(snake_case__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case__ )
]
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
else:
SCREAMING_SNAKE_CASE = self.movq.encode(snake_case__ ).latent_dist.sample(snake_case__ )
SCREAMING_SNAKE_CASE = self.movq.config.scaling_factor * init_latents
SCREAMING_SNAKE_CASE = torch.cat([init_latents] , dim=0 )
SCREAMING_SNAKE_CASE = init_latents.shape
SCREAMING_SNAKE_CASE = randn_tensor(snake_case__ , generator=snake_case__ , device=snake_case__ , dtype=snake_case__ )
# get latents
SCREAMING_SNAKE_CASE = self.scheduler.add_noise(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = init_latents
return latents
def UpperCamelCase ( self : int , snake_case__ : List[str]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
SCREAMING_SNAKE_CASE = torch.device(F"""cuda:{gpu_id}""" )
SCREAMING_SNAKE_CASE = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case__ , snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[int]=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
SCREAMING_SNAKE_CASE = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=snake_case__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE = None
for cpu_offloaded_model in [self.unet, self.movq]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cpu_offload_with_hook(snake_case__ , snake_case__ , prev_module_hook=snake_case__ )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case__ )
def __call__( self : str , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : int = 5_1_2 , snake_case__ : int = 5_1_2 , snake_case__ : int = 1_0_0 , snake_case__ : float = 4.0 , snake_case__ : float = 0.3 , snake_case__ : int = 1 , snake_case__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._execution_device
SCREAMING_SNAKE_CASE = guidance_scale > 1.0
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = image_embeds.shape[0]
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE = image_embeds.repeat_interleave(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = negative_image_embeds.repeat_interleave(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = [image]
if not all(isinstance(snake_case__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(snake_case__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
SCREAMING_SNAKE_CASE = torch.cat([prepare_image(snake_case__ , snake_case__ , snake_case__ ) for i in image] , dim=0 )
SCREAMING_SNAKE_CASE = image.to(dtype=image_embeds.dtype , device=snake_case__ )
SCREAMING_SNAKE_CASE = self.movq.encode(snake_case__ )['latents']
SCREAMING_SNAKE_CASE = latents.repeat_interleave(snake_case__ , dim=0 )
self.scheduler.set_timesteps(snake_case__ , device=snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_timesteps(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = timesteps[:1].repeat(batch_size * num_images_per_prompt )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = downscale_height_and_width(snake_case__ , snake_case__ , self.movq_scale_factor )
SCREAMING_SNAKE_CASE = self.prepare_latents(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , image_embeds.dtype , snake_case__ , snake_case__ )
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE = {'image_embeds': image_embeds}
SCREAMING_SNAKE_CASE = self.unet(
sample=snake_case__ , timestep=snake_case__ , encoder_hidden_states=snake_case__ , added_cond_kwargs=snake_case__ , return_dict=snake_case__ , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE = self.scheduler.step(
snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ , )[0]
# post-processing
SCREAMING_SNAKE_CASE = self.movq.decode(snake_case__ , force_not_quantize=snake_case__ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case__ )
| 673 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = BlipImageProcessor()
SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' )
SCREAMING_SNAKE_CASE = InstructBlipProcessor(snake_case__ , snake_case__ , snake_case__ )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self : Dict , **snake_case__ : Optional[Any] ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).tokenizer
def UpperCamelCase ( self : Dict , **snake_case__ : int ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).image_processor
def UpperCamelCase ( self : List[Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case__ ).qformer_tokenizer
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=snake_case__ , padding_value=1.0 )
SCREAMING_SNAKE_CASE = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=snake_case__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
self.assertIsInstance(processor.qformer_tokenizer , snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE = InstructBlipProcessor(
tokenizer=snake_case__ , image_processor=snake_case__ , qformer_tokenizer=snake_case__ )
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = image_processor(snake_case__ , return_tensors='np' )
SCREAMING_SNAKE_CASE = processor(images=snake_case__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE = InstructBlipProcessor(
tokenizer=snake_case__ , image_processor=snake_case__ , qformer_tokenizer=snake_case__ )
SCREAMING_SNAKE_CASE = 'lower newer'
SCREAMING_SNAKE_CASE = processor(text=snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer(snake_case__ , return_token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE = qformer_tokenizer(snake_case__ , return_token_type_ids=snake_case__ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['qformer_' + key] )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE = InstructBlipProcessor(
tokenizer=snake_case__ , image_processor=snake_case__ , qformer_tokenizer=snake_case__ )
SCREAMING_SNAKE_CASE = 'lower newer'
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
# test if it raises when no input is passed
with pytest.raises(snake_case__ ):
processor()
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE = InstructBlipProcessor(
tokenizer=snake_case__ , image_processor=snake_case__ , qformer_tokenizer=snake_case__ )
SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE = processor.batch_decode(snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_qformer_tokenizer()
SCREAMING_SNAKE_CASE = InstructBlipProcessor(
tokenizer=snake_case__ , image_processor=snake_case__ , qformer_tokenizer=snake_case__ )
SCREAMING_SNAKE_CASE = 'lower newer'
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(
list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
| 673 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
a_ : List[Any] = logging.get_logger("transformers.models.speecht5")
def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Dict:
'''simple docstring'''
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE = checkpoint['input_conv.weight_g']
SCREAMING_SNAKE_CASE = checkpoint['input_conv.weight_v']
SCREAMING_SNAKE_CASE = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.weight_g']
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.weight_v']
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : Any=None , _UpperCamelCase : List[str]=None , ) -> Tuple:
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig.from_pretrained(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.load(_UpperCamelCase )
load_weights(orig_checkpoint['model']['generator'] , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = np.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).float()
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).float()
model.save_pretrained(_UpperCamelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
a_ : Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 673 | 1 |
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
a_ : List[str] = logging.get_logger(__name__)
def __lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : List[Any] ) -> Dict:
'''simple docstring'''
try:
with open(_UpperCamelCase , 'rb' ) as flax_state_f:
SCREAMING_SNAKE_CASE = from_bytes(_UpperCamelCase , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(_UpperCamelCase ) as f:
if f.read().startswith('version' ):
raise OSError(
'You seem to have cloned a repository without having git-lfs installed. Please'
' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'
' folder you cloned.' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(_UpperCamelCase , _UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] ) -> List[Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
SCREAMING_SNAKE_CASE = flatten_dict(jax.tree_util.tree_map(lambda _UpperCamelCase : x.dtype == jnp.bfloataa , _UpperCamelCase ) ).values()
if any(_UpperCamelCase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
SCREAMING_SNAKE_CASE = jax.tree_util.tree_map(
lambda _UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _UpperCamelCase )
SCREAMING_SNAKE_CASE = ''
SCREAMING_SNAKE_CASE = flatten_dict(_UpperCamelCase , sep='.' )
SCREAMING_SNAKE_CASE = pt_model.state_dict()
# keep track of unexpected & missing keys
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
SCREAMING_SNAKE_CASE = flax_key_tuple.split('.' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
SCREAMING_SNAKE_CASE = flax_key_tuple_array[:-1] + ['weight']
SCREAMING_SNAKE_CASE = jnp.transpose(_UpperCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
SCREAMING_SNAKE_CASE = flax_key_tuple_array[:-1] + ['weight']
SCREAMING_SNAKE_CASE = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
SCREAMING_SNAKE_CASE = flax_key_tuple_array[:-1] + ['weight']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = (
flax_key_tuple_string.replace('_0' , '.0' )
.replace('_1' , '.1' )
.replace('_2' , '.2' )
.replace('_3' , '.3' )
.replace('_4' , '.4' )
.replace('_5' , '.5' )
.replace('_6' , '.6' )
.replace('_7' , '.7' )
.replace('_8' , '.8' )
.replace('_9' , '.9' )
)
SCREAMING_SNAKE_CASE = '.'.join(_UpperCamelCase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
SCREAMING_SNAKE_CASE = np.asarray(_UpperCamelCase ) if not isinstance(_UpperCamelCase , np.ndarray ) else flax_tensor
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase )
# remove from missing keys
missing_keys.remove(_UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_UpperCamelCase )
pt_model.load_state_dict(_UpperCamelCase )
# re-transform missing_keys to list
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
if len(_UpperCamelCase ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
' use it for predictions and inference.' )
return pt_model
| 673 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
a_ : List[Any] = {
"allenai/led-base-16384": 1_6384,
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =LEDTokenizer
__UpperCamelCase =["input_ids", "attention_mask"]
def __init__( self : Tuple , snake_case__ : List[Any]=None , snake_case__ : List[str]=None , snake_case__ : List[str]=None , snake_case__ : Dict="replace" , snake_case__ : Tuple="<s>" , snake_case__ : Optional[Any]="</s>" , snake_case__ : int="</s>" , snake_case__ : Dict="<s>" , snake_case__ : Union[str, Any]="<unk>" , snake_case__ : Optional[int]="<pad>" , snake_case__ : List[str]="<mask>" , snake_case__ : List[Any]=False , snake_case__ : int=True , **snake_case__ : Dict , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = pre_tok_class(**snake_case__ )
SCREAMING_SNAKE_CASE = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE = 'post_processor'
SCREAMING_SNAKE_CASE = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE = tuple(state['cls'] )
SCREAMING_SNAKE_CASE = False
if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = True
if state.get('trim_offsets' , snake_case__ ) != trim_offsets:
SCREAMING_SNAKE_CASE = trim_offsets
SCREAMING_SNAKE_CASE = True
if changes_to_apply:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , state.pop('type' ) )
SCREAMING_SNAKE_CASE = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase ( self : List[Any] , snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
SCREAMING_SNAKE_CASE = value
def UpperCamelCase ( self : Dict , *snake_case__ : Optional[Any] , **snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : List[str] , *snake_case__ : List[Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : Tuple=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Union[Dict[str, EncodedInput], BatchEncoding] , snake_case__ : Optional[int] = None , snake_case__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = super()._pad(
encoded_inputs=snake_case__ , max_length=snake_case__ , padding_strategy=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE = len(encoded_inputs['global_attention_mask'] ) != len(snake_case__ )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE = len(snake_case__ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 673 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a_ : int = logging.get_logger(__name__)
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =["pixel_values"]
def __init__( self : Union[str, Any] , snake_case__ : bool = True , snake_case__ : Dict[str, int] = None , snake_case__ : PILImageResampling = PILImageResampling.BICUBIC , snake_case__ : bool = True , snake_case__ : Union[int, float] = 1 / 2_5_5 , snake_case__ : bool = True , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : bool = True , **snake_case__ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE = size if size is not None else {'height': 3_8_4, 'width': 3_8_4}
SCREAMING_SNAKE_CASE = get_size_dict(snake_case__ , default_to_square=snake_case__ )
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = resample
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = rescale_factor
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
SCREAMING_SNAKE_CASE = image_std if image_std is not None else OPENAI_CLIP_STD
SCREAMING_SNAKE_CASE = do_convert_rgb
def UpperCamelCase ( self : Dict , snake_case__ : np.ndarray , snake_case__ : Dict[str, int] , snake_case__ : PILImageResampling = PILImageResampling.BICUBIC , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : Dict , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = get_size_dict(snake_case__ , default_to_square=snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE = (size['height'], size['width'])
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def UpperCamelCase ( self : str , snake_case__ : np.ndarray , snake_case__ : Union[int, float] , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : Union[str, Any] , ):
"""simple docstring"""
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def UpperCamelCase ( self : Tuple , snake_case__ : np.ndarray , snake_case__ : Union[float, List[float]] , snake_case__ : Union[float, List[float]] , snake_case__ : Optional[Union[str, ChannelDimension]] = None , **snake_case__ : int , ):
"""simple docstring"""
return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__ )
def UpperCamelCase ( self : List[Any] , snake_case__ : ImageInput , snake_case__ : Optional[bool] = None , snake_case__ : Optional[Dict[str, int]] = None , snake_case__ : PILImageResampling = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[float] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : Optional[Union[float, List[float]]] = None , snake_case__ : Optional[Union[str, TensorType]] = None , snake_case__ : bool = None , snake_case__ : ChannelDimension = ChannelDimension.FIRST , **snake_case__ : Any , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE = size if size is not None else self.size
SCREAMING_SNAKE_CASE = get_size_dict(snake_case__ , default_to_square=snake_case__ )
SCREAMING_SNAKE_CASE = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE = [convert_to_rgb(snake_case__ ) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE = [to_numpy_array(snake_case__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE = [self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE = [self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__ ) for image in images]
SCREAMING_SNAKE_CASE = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
SCREAMING_SNAKE_CASE = BatchFeature(data={'pixel_values': images} , tensor_type=snake_case__ )
return encoded_outputs
| 673 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __lowerCAmelCase ( *_UpperCamelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(_UpperCamelCase , 'r' ) as fh:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_EX )
try:
print(*_UpperCamelCase )
finally:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_UN )
a_ : int = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
a_ : str = torch.device("cuda", local_rank)
a_ : Optional[int] = socket.gethostname()
a_ : Union[str, Any] = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
a_ : Dict = dist.get_rank()
a_ : Any = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 673 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : int = logging.get_logger(__name__)
a_ : Any = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="vit_msn"
def __init__( self : int , snake_case__ : Any=7_6_8 , snake_case__ : List[Any]=1_2 , snake_case__ : Dict=1_2 , snake_case__ : Optional[int]=3_0_7_2 , snake_case__ : Optional[Any]="gelu" , snake_case__ : Optional[Any]=0.0 , snake_case__ : Dict=0.0 , snake_case__ : str=0.02 , snake_case__ : Optional[Any]=1E-06 , snake_case__ : Any=2_2_4 , snake_case__ : Union[str, Any]=1_6 , snake_case__ : Optional[Any]=3 , snake_case__ : Optional[Any]=True , **snake_case__ : Tuple , ):
"""simple docstring"""
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = qkv_bias
| 673 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
a_ : Union[str, Any] = {"allegro/herbert-base-cased": 514}
a_ : List[Any] = {}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =HerbertTokenizer
def __init__( self : Tuple , snake_case__ : Optional[Any]=None , snake_case__ : int=None , snake_case__ : Optional[int]=None , snake_case__ : str="<s>" , snake_case__ : Tuple="<unk>" , snake_case__ : List[str]="<pad>" , snake_case__ : Tuple="<mask>" , snake_case__ : Dict="</s>" , **snake_case__ : List[str] , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , sep_token=snake_case__ , **snake_case__ , )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 673 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Union[str, Any] = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="xmod"
def __init__( self : str , snake_case__ : Any=3_0_5_2_2 , snake_case__ : Union[str, Any]=7_6_8 , snake_case__ : Tuple=1_2 , snake_case__ : List[Any]=1_2 , snake_case__ : Any=3_0_7_2 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : List[str]=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Dict=5_1_2 , snake_case__ : int=2 , snake_case__ : str=0.02 , snake_case__ : List[Any]=1E-12 , snake_case__ : Tuple=1 , snake_case__ : List[str]=0 , snake_case__ : List[str]=2 , snake_case__ : Union[str, Any]="absolute" , snake_case__ : str=True , snake_case__ : Union[str, Any]=None , snake_case__ : int=False , snake_case__ : Any=2 , snake_case__ : List[str]=False , snake_case__ : Optional[int]=True , snake_case__ : Union[str, Any]=True , snake_case__ : List[str]=("en_XX",) , snake_case__ : List[Any]=None , **snake_case__ : Dict , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = classifier_dropout
SCREAMING_SNAKE_CASE = pre_norm
SCREAMING_SNAKE_CASE = adapter_reduction_factor
SCREAMING_SNAKE_CASE = adapter_layer_norm
SCREAMING_SNAKE_CASE = adapter_reuse_layer_norm
SCREAMING_SNAKE_CASE = ln_before_adapter
SCREAMING_SNAKE_CASE = list(snake_case__ )
SCREAMING_SNAKE_CASE = default_language
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
@property
def UpperCamelCase ( self : str ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 673 |
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = abs(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
while n > 0:
res += n % 10
n //= 10
return res
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = abs(_UpperCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
return sum(int(_UpperCamelCase ) for c in str(abs(_UpperCamelCase ) ) )
def __lowerCAmelCase ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_UpperCamelCase : Callable , _UpperCamelCase : int ) -> None:
SCREAMING_SNAKE_CASE = f"""{func.__name__}({value})"""
SCREAMING_SNAKE_CASE = timeit(f"""__main__.{call}""" , setup='import __main__' )
print(f"""{call:56} = {func(_UpperCamelCase )} -- {timing:.4f} seconds""" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_UpperCamelCase , _UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 673 | 1 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
a_ : List[str] = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
a_ : Dict = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
a_ : str = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
a_ : int = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def UpperCamelCase ( self : str ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def UpperCamelCase ( self : Dict , snake_case__ : int ):
"""simple docstring"""
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : List[Any]=0.9 , snake_case__ : Optional[Any]=3 , snake_case__ : Any=0.5 ):
"""simple docstring"""
if NLTK_VERSION >= version.Version('3.6.5' ):
SCREAMING_SNAKE_CASE = [
meteor_score.single_meteor_score(
word_tokenize(snake_case__ ) , word_tokenize(snake_case__ ) , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
else:
SCREAMING_SNAKE_CASE = [
meteor_score.single_meteor_score(snake_case__ , snake_case__ , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
return {"meteor": np.mean(snake_case__ )}
| 673 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 673 | 1 |
import re
from filelock import FileLock
try:
import nltk
a_ : List[str] = True
except (ImportError, ModuleNotFoundError):
a_ : Dict = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def __lowerCAmelCase ( _UpperCamelCase : str ) -> str:
'''simple docstring'''
re.sub('<n>' , '' , _UpperCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_UpperCamelCase ) )
| 673 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =AudioLDMPipeline
__UpperCamelCase =TEXT_TO_AUDIO_PARAMS
__UpperCamelCase =TEXT_TO_AUDIO_BATCH_PARAMS
__UpperCamelCase =frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(3_2, 6_4) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=3_2 , class_embeddings_concat=snake_case__ , )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , projection_dim=3_2 , )
SCREAMING_SNAKE_CASE = ClapTextModelWithProjection(snake_case__ )
SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=7_7 )
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6_0_0_0 , upsample_initial_channel=1_6 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=snake_case__ , )
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(snake_case__ )
SCREAMING_SNAKE_CASE = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def UpperCamelCase ( self : Optional[int] , snake_case__ : int , snake_case__ : int=0 ):
"""simple docstring"""
if str(snake_case__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE = torch.manual_seed(snake_case__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 2_5_6
SCREAMING_SNAKE_CASE = audio[:1_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder(
snake_case__ , )
SCREAMING_SNAKE_CASE = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE = F.normalize(snake_case__ , dim=-1 )
SCREAMING_SNAKE_CASE = prompt_embeds
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE = negative_prompt
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE = []
for p in [prompt, negative_prompt]:
SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder(
snake_case__ , )
SCREAMING_SNAKE_CASE = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE = F.normalize(snake_case__ , dim=-1 )
embeds.append(snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = embeds
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 'egg cracking'
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ , negative_prompt=snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 2_5_6
SCREAMING_SNAKE_CASE = audio[:1_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_5_6)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_5_6)
# test num_waveforms_per_prompt for single prompt
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=2 , num_waveforms_per_prompt=snake_case__ ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_5_6)
# test num_waveforms_per_prompt for batch of prompts
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=snake_case__ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6)
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config.sampling_rate
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.016 , **snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) / vocoder_sampling_rate == 0.016
SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.032 , **snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) / vocoder_sampling_rate == 0.032
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = ['hey']
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=1 )
SCREAMING_SNAKE_CASE = output.audios.shape
assert audio_shape == (1, 2_5_6)
SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(snake_case__ ).to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=1 )
SCREAMING_SNAKE_CASE = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_5_6)
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=snake_case__ )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case__ )
@slow
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : int , snake_case__ : int , snake_case__ : Tuple="cpu" , snake_case__ : List[str]=torch.floataa , snake_case__ : Optional[Any]=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = np.random.RandomState(snake_case__ ).standard_normal((1, 8, 1_2_8, 1_6) )
SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 2_5
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ).audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 8_1_9_2_0
SCREAMING_SNAKE_CASE = audio[7_7_2_3_0:7_7_2_4_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315] )
SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ).audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 8_1_9_2_0
SCREAMING_SNAKE_CASE = audio[2_7_7_8_0:2_7_7_9_0]
SCREAMING_SNAKE_CASE = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212] )
SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 673 | 1 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class UpperCamelCase ( yaml.SafeLoader ):
def UpperCamelCase ( self : List[str] , snake_case__ : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE = [tuple(snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else key for key in keys]
SCREAMING_SNAKE_CASE = Counter(snake_case__ )
SCREAMING_SNAKE_CASE = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" )
def UpperCamelCase ( self : str , snake_case__ : str , snake_case__ : Any=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = super().construct_mapping(snake_case__ , deep=snake_case__ )
self._check_no_duplicates_on_constructed_node(snake_case__ )
return mapping
def __lowerCAmelCase ( _UpperCamelCase : str ) -> Tuple[Optional[str], str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE = full_content[1:].index('---' ) + 1
SCREAMING_SNAKE_CASE = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(_UpperCamelCase )
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
# class attributes
__UpperCamelCase ={"train_eval_index"} # train-eval-index in the YAML metadata
@classmethod
def UpperCamelCase ( cls : Union[str, Any] , snake_case__ : Path ):
"""simple docstring"""
with open(snake_case__ , encoding='utf-8' ) as readme_file:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(snake_case__ )
else:
return cls()
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Path ):
"""simple docstring"""
if path.exists():
with open(snake_case__ , encoding='utf-8' ) as readme_file:
SCREAMING_SNAKE_CASE = readme_file.read()
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = self._to_readme(snake_case__ )
with open(snake_case__ , 'w' , encoding='utf-8' ) as readme_file:
readme_file.write(snake_case__ )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if readme_content is not None:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = _split_yaml_from_readme(snake_case__ )
SCREAMING_SNAKE_CASE = '---\n' + self.to_yaml_string() + '---\n' + content
else:
SCREAMING_SNAKE_CASE = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def UpperCamelCase ( cls : Dict , snake_case__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = yaml.load(snake_case__ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE = {
(key.replace('-' , '_' ) if key.replace('-' , '_' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**snake_case__ )
def UpperCamelCase ( self : str ):
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace('_' , '-' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=snake_case__ , allow_unicode=snake_case__ , encoding='utf-8' , ).decode('utf-8' )
a_ : int = {
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
a_ : int = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
a_ : Optional[int] = ap.parse_args()
a_ : List[str] = Path(args.readme_filepath)
a_ : str = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 673 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase :
def __init__( self : Dict , snake_case__ : str , snake_case__ : str=1_3 , snake_case__ : Tuple=7 , snake_case__ : Tuple=True , snake_case__ : Tuple=True , snake_case__ : List[str]=False , snake_case__ : Any=True , snake_case__ : Union[str, Any]=9_9 , snake_case__ : Dict=3_2 , snake_case__ : Optional[Any]=5 , snake_case__ : Optional[Any]=4 , snake_case__ : Union[str, Any]=3_7 , snake_case__ : Tuple="gelu" , snake_case__ : Dict=0.1 , snake_case__ : Any=0.1 , snake_case__ : int=5_1_2 , snake_case__ : Dict=1_6 , snake_case__ : str=2 , snake_case__ : Any=0.02 , snake_case__ : List[str]=3 , snake_case__ : int=4 , snake_case__ : List[str]=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , use_stable_embedding=snake_case__ , )
def UpperCamelCase ( self : int , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : str , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : str , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , )
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , )
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Tuple , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : int , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , use_cache=snake_case__ , )
SCREAMING_SNAKE_CASE = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0]
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , past_key_values=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCamelCase =(OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCamelCase =(
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'single_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'multi_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCamelCase ( self : str , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = ids_tensor([1, 1_0] , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
SCREAMING_SNAKE_CASE = original_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE = original_model(snake_case__ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
SCREAMING_SNAKE_CASE = scaled_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
| 673 | 1 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
a_ : List[str] = NewType("DataClass", Any)
a_ : Optional[Any] = NewType("DataClassType", Any)
def __lowerCAmelCase ( _UpperCamelCase : Tuple ) -> Tuple:
'''simple docstring'''
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def __lowerCAmelCase ( _UpperCamelCase : list ) -> Callable[[str], Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {str(_UpperCamelCase ): choice for choice in choices}
return lambda _UpperCamelCase : str_to_choice.get(_UpperCamelCase , _UpperCamelCase )
def __lowerCAmelCase ( *,
_UpperCamelCase : Union[str, List[str]] = None , _UpperCamelCase : str = None , _UpperCamelCase : Any = dataclasses.MISSING , _UpperCamelCase : Callable[[], Any] = dataclasses.MISSING , _UpperCamelCase : dict = None , **_UpperCamelCase : Optional[int] , ) -> dataclasses.Field:
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
SCREAMING_SNAKE_CASE = {}
if aliases is not None:
SCREAMING_SNAKE_CASE = aliases
if help is not None:
SCREAMING_SNAKE_CASE = help
return dataclasses.field(metadata=_UpperCamelCase , default=_UpperCamelCase , default_factory=_UpperCamelCase , **_UpperCamelCase )
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =42
def __init__( self : List[Any] , snake_case__ : Union[DataClassType, Iterable[DataClassType]] , **snake_case__ : str ):
"""simple docstring"""
if "formatter_class" not in kwargs:
SCREAMING_SNAKE_CASE = ArgumentDefaultsHelpFormatter
super().__init__(**snake_case__ )
if dataclasses.is_dataclass(snake_case__ ):
SCREAMING_SNAKE_CASE = [dataclass_types]
SCREAMING_SNAKE_CASE = list(snake_case__ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(snake_case__ )
@staticmethod
def UpperCamelCase ( snake_case__ : ArgumentParser , snake_case__ : dataclasses.Field ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = F"""--{field.name}"""
SCREAMING_SNAKE_CASE = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , snake_case__ ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
SCREAMING_SNAKE_CASE = kwargs.pop('aliases' , [] )
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = [aliases]
SCREAMING_SNAKE_CASE = getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(snake_case__ , 'UnionType' ) and isinstance(snake_case__ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(snake_case__ ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F""" Problem encountered in field '{field.name}'.""" )
if type(snake_case__ ) not in field.type.__args__:
# filter `str` in Union
SCREAMING_SNAKE_CASE = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
SCREAMING_SNAKE_CASE = getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
SCREAMING_SNAKE_CASE = (
field.type.__args__[0] if isinstance(snake_case__ , field.type.__args__[1] ) else field.type.__args__[1]
)
SCREAMING_SNAKE_CASE = getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
SCREAMING_SNAKE_CASE = {}
if origin_type is Literal or (isinstance(field.type , snake_case__ ) and issubclass(field.type , snake_case__ )):
if origin_type is Literal:
SCREAMING_SNAKE_CASE = field.type.__args__
else:
SCREAMING_SNAKE_CASE = [x.value for x in field.type]
SCREAMING_SNAKE_CASE = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE = field.default
else:
SCREAMING_SNAKE_CASE = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
SCREAMING_SNAKE_CASE = copy(snake_case__ )
# Hack because type=bool in argparse does not behave as we want.
SCREAMING_SNAKE_CASE = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
SCREAMING_SNAKE_CASE = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
SCREAMING_SNAKE_CASE = default
# This tells argparse we accept 0 or 1 value after --field_name
SCREAMING_SNAKE_CASE = '?'
# This is the value that will get picked if we do --field_name (without value)
SCREAMING_SNAKE_CASE = True
elif isclass(snake_case__ ) and issubclass(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = field.type.__args__[0]
SCREAMING_SNAKE_CASE = '+'
if field.default_factory is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE = field.default_factory()
elif field.default is dataclasses.MISSING:
SCREAMING_SNAKE_CASE = True
else:
SCREAMING_SNAKE_CASE = field.type
if field.default is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE = field.default
elif field.default_factory is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE = field.default_factory()
else:
SCREAMING_SNAKE_CASE = True
parser.add_argument(snake_case__ , *snake_case__ , **snake_case__ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
SCREAMING_SNAKE_CASE = False
parser.add_argument(F"""--no_{field.name}""" , action='store_false' , dest=field.name , **snake_case__ )
def UpperCamelCase ( self : Any , snake_case__ : DataClassType ):
"""simple docstring"""
if hasattr(snake_case__ , '_argument_group_name' ):
SCREAMING_SNAKE_CASE = self.add_argument_group(dtype._argument_group_name )
else:
SCREAMING_SNAKE_CASE = self
try:
SCREAMING_SNAKE_CASE = get_type_hints(snake_case__ )
except NameError:
raise RuntimeError(
F"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(snake_case__ ):
SCREAMING_SNAKE_CASE = '.'.join(map(snake_case__ , sys.version_info[:3] ) )
raise RuntimeError(
F"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(snake_case__ ):
if not field.init:
continue
SCREAMING_SNAKE_CASE = type_hints[field.name]
self._parse_dataclass_field(snake_case__ , snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Union[str, Any]=None , snake_case__ : str=False , snake_case__ : Dict=True , snake_case__ : Optional[int]=None , snake_case__ : str=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
SCREAMING_SNAKE_CASE = []
if args_filename:
args_files.append(Path(snake_case__ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
SCREAMING_SNAKE_CASE = ArgumentParser()
args_file_parser.add_argument(snake_case__ , type=snake_case__ , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = args_file_parser.parse_known_args(args=snake_case__ )
SCREAMING_SNAKE_CASE = vars(snake_case__ ).get(args_file_flag.lstrip('-' ) , snake_case__ )
if cmd_args_file_paths:
args_files.extend([Path(snake_case__ ) for p in cmd_args_file_paths] )
SCREAMING_SNAKE_CASE = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
SCREAMING_SNAKE_CASE = file_args + args if args is not None else file_args + sys.argv[1:]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.parse_known_args(args=snake_case__ )
SCREAMING_SNAKE_CASE = []
for dtype in self.dataclass_types:
SCREAMING_SNAKE_CASE = {f.name for f in dataclasses.fields(snake_case__ ) if f.init}
SCREAMING_SNAKE_CASE = {k: v for k, v in vars(snake_case__ ).items() if k in keys}
for k in keys:
delattr(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = dtype(**snake_case__ )
outputs.append(snake_case__ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(snake_case__ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def UpperCamelCase ( self : int , snake_case__ : Dict[str, Any] , snake_case__ : bool = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = set(args.keys() )
SCREAMING_SNAKE_CASE = []
for dtype in self.dataclass_types:
SCREAMING_SNAKE_CASE = {f.name for f in dataclasses.fields(snake_case__ ) if f.init}
SCREAMING_SNAKE_CASE = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
SCREAMING_SNAKE_CASE = dtype(**snake_case__ )
outputs.append(snake_case__ )
if not allow_extra_keys and unused_keys:
raise ValueError(F"""Some keys are not used by the HfArgumentParser: {sorted(snake_case__ )}""" )
return tuple(snake_case__ )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : str , snake_case__ : bool = False ):
"""simple docstring"""
with open(Path(snake_case__ ) , encoding='utf-8' ) as open_json_file:
SCREAMING_SNAKE_CASE = json.loads(open_json_file.read() )
SCREAMING_SNAKE_CASE = self.parse_dict(snake_case__ , allow_extra_keys=snake_case__ )
return tuple(snake_case__ )
def UpperCamelCase ( self : int , snake_case__ : str , snake_case__ : bool = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.parse_dict(yaml.safe_load(Path(snake_case__ ).read_text() ) , allow_extra_keys=snake_case__ )
return tuple(snake_case__ )
| 673 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="openai/whisper-base"
__UpperCamelCase =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__UpperCamelCase ="transcriber"
__UpperCamelCase =WhisperProcessor
__UpperCamelCase =WhisperForConditionalGeneration
__UpperCamelCase =["audio"]
__UpperCamelCase =["text"]
def UpperCamelCase ( self : Dict , snake_case__ : Tuple ):
"""simple docstring"""
return self.pre_processor(snake_case__ , return_tensors='pt' ).input_features
def UpperCamelCase ( self : Optional[int] , snake_case__ : Tuple ):
"""simple docstring"""
return self.model.generate(inputs=snake_case__ )
def UpperCamelCase ( self : str , snake_case__ : Union[str, Any] ):
"""simple docstring"""
return self.pre_processor.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )[0]
| 673 | 1 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
def __init__( self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[Any]=1_3 , snake_case__ : Optional[int]=7 , snake_case__ : Optional[Any]=True , snake_case__ : Any=True , snake_case__ : str=False , snake_case__ : int=True , snake_case__ : List[str]=9_9 , snake_case__ : List[str]=3_2 , snake_case__ : int=5 , snake_case__ : Dict=4 , snake_case__ : int=3_7 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : Union[str, Any]=0.1 , snake_case__ : Optional[Any]=0.1 , snake_case__ : List[str]=5_1_2 , snake_case__ : str=1_6 , snake_case__ : Any=2 , snake_case__ : Tuple=0.02 , snake_case__ : Dict=3 , snake_case__ : Dict=4 , snake_case__ : Union[str, Any]=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Any ):
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def UpperCamelCase ( self : Tuple , snake_case__ : int , snake_case__ : Any , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = BioGptModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Any , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : Dict , snake_case__ : str , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = BioGptForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : int , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : Any , snake_case__ : Dict , *snake_case__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = BioGptModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# create attention mask
SCREAMING_SNAKE_CASE = torch.ones(input_ids.shape , dtype=torch.long , device=snake_case__ )
SCREAMING_SNAKE_CASE = self.seq_length // 2
SCREAMING_SNAKE_CASE = 0
# first forward pass
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
SCREAMING_SNAKE_CASE = ids_tensor((1,) , snake_case__ ).item() + 1
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
SCREAMING_SNAKE_CASE = random_other_next_tokens
# append to next input_ids and attn_mask
SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=snake_case__ )] , dim=1 , )
# get two different outputs
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )['last_hidden_state']
SCREAMING_SNAKE_CASE = model(snake_case__ , past_key_values=snake_case__ , attention_mask=snake_case__ )['last_hidden_state']
# select random slice
SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE = output_from_no_past[:, -1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : Tuple , snake_case__ : List[str] , *snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = BioGptModel(config=snake_case__ ).to(snake_case__ ).eval()
SCREAMING_SNAKE_CASE = torch.ones(input_ids.shape , dtype=torch.long , device=snake_case__ )
# first forward pass
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , use_cache=snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )['last_hidden_state']
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ )[
'last_hidden_state'
]
# select random slice
SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) )
def UpperCamelCase ( self : Any , snake_case__ : int , snake_case__ : List[str] , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : int , *snake_case__ : Optional[int] , snake_case__ : Optional[int]=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = BioGptForCausalLM(snake_case__ )
model.to(snake_case__ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
SCREAMING_SNAKE_CASE = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def UpperCamelCase ( self : List[str] , snake_case__ : Dict , *snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = BioGptModel(snake_case__ )
SCREAMING_SNAKE_CASE = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Tuple , *snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = BioGptForTokenClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
__UpperCamelCase =(BioGptForCausalLM,) if is_torch_available() else ()
__UpperCamelCase =(
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = BioGptModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*snake_case__ )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*snake_case__ , gradient_checkpointing=snake_case__ )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*snake_case__ )
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*snake_case__ )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*snake_case__ )
@slow
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(snake_case__ )
SCREAMING_SNAKE_CASE = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
SCREAMING_SNAKE_CASE = 'left'
# Define PAD Token = EOS Token = 50256
SCREAMING_SNAKE_CASE = tokenizer.eos_token
SCREAMING_SNAKE_CASE = model.config.eos_token_id
# use different length sentences to test batching
SCREAMING_SNAKE_CASE = [
'Hello, my dog is a little',
'Today, I',
]
SCREAMING_SNAKE_CASE = tokenizer(snake_case__ , return_tensors='pt' , padding=snake_case__ )
SCREAMING_SNAKE_CASE = inputs['input_ids'].to(snake_case__ )
SCREAMING_SNAKE_CASE = model.generate(
input_ids=snake_case__ , attention_mask=inputs['attention_mask'].to(snake_case__ ) , )
SCREAMING_SNAKE_CASE = tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(snake_case__ )
SCREAMING_SNAKE_CASE = model.generate(input_ids=snake_case__ )
SCREAMING_SNAKE_CASE = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item()
SCREAMING_SNAKE_CASE = tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(snake_case__ )
SCREAMING_SNAKE_CASE = model.generate(input_ids=snake_case__ , max_length=model.config.max_length - num_paddings )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE = [
'Hello, my dog is a little bit bigger than a little bit.',
'Today, I have a good idea of how to use the information',
]
self.assertListEqual(snake_case__ , snake_case__ )
self.assertListEqual(snake_case__ , [non_padded_sentence, padded_sentence] )
@slow
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = BioGptModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE = BioGptForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'multi_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE = BioGptForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
SCREAMING_SNAKE_CASE = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] )
SCREAMING_SNAKE_CASE = model(snake_case__ )[0]
SCREAMING_SNAKE_CASE = 4_2_3_8_4
SCREAMING_SNAKE_CASE = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , snake_case__ )
SCREAMING_SNAKE_CASE = torch.tensor(
[[[-9.5_236, -9.8_918, 10.4_557], [-11.0_469, -9.6_423, 8.1_022], [-8.8_664, -7.8_826, 5.5_325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
SCREAMING_SNAKE_CASE = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(snake_case__ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = tokenizer('COVID-19 is' , return_tensors='pt' ).to(snake_case__ )
SCREAMING_SNAKE_CASE = model.generate(
**snake_case__ , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=snake_case__ , )
SCREAMING_SNAKE_CASE = tokenizer.decode(output_ids[0] , skip_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE = (
'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'
' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'
' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'
' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'
' more than 800,000 deaths.'
)
self.assertEqual(snake_case__ , snake_case__ )
| 673 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
a_ : List[str] = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
a_ : Dict = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
a_ : str = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
a_ : int = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def UpperCamelCase ( self : str ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def UpperCamelCase ( self : Dict , snake_case__ : int ):
"""simple docstring"""
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : List[Any]=0.9 , snake_case__ : Optional[Any]=3 , snake_case__ : Any=0.5 ):
"""simple docstring"""
if NLTK_VERSION >= version.Version('3.6.5' ):
SCREAMING_SNAKE_CASE = [
meteor_score.single_meteor_score(
word_tokenize(snake_case__ ) , word_tokenize(snake_case__ ) , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
else:
SCREAMING_SNAKE_CASE = [
meteor_score.single_meteor_score(snake_case__ , snake_case__ , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
return {"meteor": np.mean(snake_case__ )}
| 673 | 1 |
from functools import lru_cache
@lru_cache
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 |
import numpy as np
def __lowerCAmelCase ( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def __lowerCAmelCase ( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 | 1 |
from __future__ import annotations
from typing import Any
def __lowerCAmelCase ( _UpperCamelCase : list[Any] ) -> None:
'''simple docstring'''
create_state_space_tree(_UpperCamelCase , [] , 0 )
def __lowerCAmelCase ( _UpperCamelCase : list[Any] , _UpperCamelCase : list[Any] , _UpperCamelCase : int ) -> None:
'''simple docstring'''
if index == len(_UpperCamelCase ):
print(_UpperCamelCase )
return
create_state_space_tree(_UpperCamelCase , _UpperCamelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_UpperCamelCase , _UpperCamelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
a_ : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 673 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a_ : Any = logging.get_logger(__name__)
a_ : Dict = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="van"
def __init__( self : Optional[Any] , snake_case__ : Tuple=2_2_4 , snake_case__ : Dict=3 , snake_case__ : Union[str, Any]=[7, 3, 3, 3] , snake_case__ : str=[4, 2, 2, 2] , snake_case__ : Optional[Any]=[6_4, 1_2_8, 3_2_0, 5_1_2] , snake_case__ : Optional[Any]=[3, 3, 1_2, 3] , snake_case__ : Tuple=[8, 8, 4, 4] , snake_case__ : Any="gelu" , snake_case__ : Dict=0.02 , snake_case__ : List[str]=1E-6 , snake_case__ : int=1E-2 , snake_case__ : Any=0.0 , snake_case__ : Tuple=0.0 , **snake_case__ : Any , ):
"""simple docstring"""
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_sizes
SCREAMING_SNAKE_CASE = strides
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = mlp_ratios
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = layer_scale_init_value
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = dropout_rate
| 673 | 1 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class UpperCamelCase :
def __init__( self : Dict , snake_case__ : int , snake_case__ : Tuple=sys.maxsize ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'bilinear'
SCREAMING_SNAKE_CASE = max_size
SCREAMING_SNAKE_CASE = short_edge_length
def __call__( self : Tuple , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
for img in imgs:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = img.shape[:2]
# later: provide list and randomly choose index for resize
SCREAMING_SNAKE_CASE = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
SCREAMING_SNAKE_CASE = size * 1.0 / min(snake_case__ , snake_case__ )
if h < w:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = size, scale * w
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = scale * h, size
if max(snake_case__ , snake_case__ ) > self.max_size:
SCREAMING_SNAKE_CASE = self.max_size * 1.0 / max(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = newh * scale
SCREAMING_SNAKE_CASE = neww * scale
SCREAMING_SNAKE_CASE = int(neww + 0.5 )
SCREAMING_SNAKE_CASE = int(newh + 0.5 )
if img.dtype == np.uinta:
SCREAMING_SNAKE_CASE = Image.fromarray(snake_case__ )
SCREAMING_SNAKE_CASE = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
SCREAMING_SNAKE_CASE = np.asarray(snake_case__ )
else:
SCREAMING_SNAKE_CASE = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
SCREAMING_SNAKE_CASE = nn.functional.interpolate(
snake_case__ , (newh, neww) , mode=self.interp_method , align_corners=snake_case__ ).squeeze(0 )
img_augs.append(snake_case__ )
return img_augs
class UpperCamelCase :
def __init__( self : int , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
SCREAMING_SNAKE_CASE = cfg.INPUT.FORMAT
SCREAMING_SNAKE_CASE = cfg.SIZE_DIVISIBILITY
SCREAMING_SNAKE_CASE = cfg.PAD_VALUE
SCREAMING_SNAKE_CASE = cfg.INPUT.MAX_SIZE_TEST
SCREAMING_SNAKE_CASE = cfg.MODEL.DEVICE
SCREAMING_SNAKE_CASE = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
SCREAMING_SNAKE_CASE = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
SCREAMING_SNAKE_CASE = lambda snake_case__ : (x - self.pixel_mean) / self.pixel_std
def UpperCamelCase ( self : Tuple , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = tuple(max(snake_case__ ) for s in zip(*[img.shape for img in images] ) )
SCREAMING_SNAKE_CASE = [im.shape[-2:] for im in images]
SCREAMING_SNAKE_CASE = [
nn.functional.pad(
snake_case__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(snake_case__ , snake_case__ )
]
return torch.stack(snake_case__ ), torch.tensor(snake_case__ )
def __call__( self : Optional[int] , snake_case__ : Tuple , snake_case__ : Optional[Any]=False ):
"""simple docstring"""
with torch.no_grad():
if not isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = [images]
if single_image:
assert len(snake_case__ ) == 1
for i in range(len(snake_case__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(snake_case__ , images.pop(snake_case__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
snake_case__ , torch.as_tensor(img_tensorize(images.pop(snake_case__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
SCREAMING_SNAKE_CASE = torch.tensor([im.shape[:2] for im in images] )
SCREAMING_SNAKE_CASE = self.aug(snake_case__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
SCREAMING_SNAKE_CASE = [self.normalizer(snake_case__ ) for x in images]
# now pad them to do the following operations
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.pad(snake_case__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
SCREAMING_SNAKE_CASE = torch.true_divide(snake_case__ , snake_case__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Tuple[int, int] ) -> int:
'''simple docstring'''
assert torch.isfinite(_UpperCamelCase ).all(), "Box tensor contains infinite or NaN!"
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = box_size
tensor[:, 0].clamp_(min=0 , max=_UpperCamelCase )
tensor[:, 1].clamp_(min=0 , max=_UpperCamelCase )
tensor[:, 2].clamp_(min=0 , max=_UpperCamelCase )
tensor[:, 3].clamp_(min=0 , max=_UpperCamelCase )
| 673 |
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : int ) -> list[str]:
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(_UpperCamelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 673 | 1 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =FlaxAutoencoderKL
@property
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = (3_2, 3_2)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE = jax.random.uniform(snake_case__ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {
'block_out_channels': [3_2, 6_4],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
SCREAMING_SNAKE_CASE = self.dummy_input
return init_dict, inputs_dict
| 673 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
a_ : Dict = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : Any , **snake_case__ : Optional[int] ):
"""simple docstring"""
super().__init__(**snake_case__ )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , 'vision' )
self.check_model_type(snake_case__ )
def __call__( self : List[Any] , snake_case__ : Union[str, "Image.Image", List[Dict[str, Any]]] , snake_case__ : Union[str, List[str]] = None , **snake_case__ : Union[str, Any] , ):
"""simple docstring"""
if "text_queries" in kwargs:
SCREAMING_SNAKE_CASE = kwargs.pop('text_queries' )
if isinstance(snake_case__ , (str, Image.Image) ):
SCREAMING_SNAKE_CASE = {'image': image, 'candidate_labels': candidate_labels}
else:
SCREAMING_SNAKE_CASE = image
SCREAMING_SNAKE_CASE = super().__call__(snake_case__ , **snake_case__ )
return results
def UpperCamelCase ( self : Union[str, Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {}
if "threshold" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['threshold']
if "top_k" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['top_k']
return {}, {}, postprocess_params
def UpperCamelCase ( self : List[Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = load_image(inputs['image'] )
SCREAMING_SNAKE_CASE = inputs['candidate_labels']
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = candidate_labels.split(',' )
SCREAMING_SNAKE_CASE = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(snake_case__ ):
SCREAMING_SNAKE_CASE = self.tokenizer(snake_case__ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE = self.image_processor(snake_case__ , return_tensors=self.framework )
yield {
"is_last": i == len(snake_case__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def UpperCamelCase ( self : Any , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = model_inputs.pop('target_size' )
SCREAMING_SNAKE_CASE = model_inputs.pop('candidate_label' )
SCREAMING_SNAKE_CASE = model_inputs.pop('is_last' )
SCREAMING_SNAKE_CASE = self.model(**snake_case__ )
SCREAMING_SNAKE_CASE = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : str=0.1 , snake_case__ : Union[str, Any]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
for model_output in model_outputs:
SCREAMING_SNAKE_CASE = model_output['candidate_label']
SCREAMING_SNAKE_CASE = BaseModelOutput(snake_case__ )
SCREAMING_SNAKE_CASE = self.image_processor.post_process_object_detection(
outputs=snake_case__ , threshold=snake_case__ , target_sizes=model_output['target_size'] )[0]
for index in outputs["scores"].nonzero():
SCREAMING_SNAKE_CASE = outputs['scores'][index].item()
SCREAMING_SNAKE_CASE = self._get_bounding_box(outputs['boxes'][index][0] )
SCREAMING_SNAKE_CASE = {'score': score, 'label': label, 'box': box}
results.append(snake_case__ )
SCREAMING_SNAKE_CASE = sorted(snake_case__ , key=lambda snake_case__ : x["score"] , reverse=snake_case__ )
if top_k:
SCREAMING_SNAKE_CASE = results[:top_k]
return results
def UpperCamelCase ( self : List[Any] , snake_case__ : "torch.Tensor" ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = box.int().tolist()
SCREAMING_SNAKE_CASE = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 673 | 1 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'ylacombe/bark-small'
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = 'en_speaker_1'
SCREAMING_SNAKE_CASE = 'This is a test string'
SCREAMING_SNAKE_CASE = 'speaker_embeddings_path.json'
SCREAMING_SNAKE_CASE = 'speaker_embeddings'
def UpperCamelCase ( self : int , **snake_case__ : Any ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BarkProcessor(tokenizer=snake_case__ )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
SCREAMING_SNAKE_CASE = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
SCREAMING_SNAKE_CASE = 3_5
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 8
SCREAMING_SNAKE_CASE = {
'semantic_prompt': np.ones(snake_case__ ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
SCREAMING_SNAKE_CASE = processor(text=self.input_string , voice_preset=snake_case__ )
SCREAMING_SNAKE_CASE = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(snake_case__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(snake_case__ , **snake_case__ )
SCREAMING_SNAKE_CASE = processor(text=self.input_string , voice_preset=snake_case__ )
SCREAMING_SNAKE_CASE = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(snake_case__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
SCREAMING_SNAKE_CASE = processor(text=self.input_string , voice_preset=self.voice_preset )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BarkProcessor(tokenizer=snake_case__ )
SCREAMING_SNAKE_CASE = processor(text=self.input_string )
SCREAMING_SNAKE_CASE = tokenizer(
self.input_string , padding='max_length' , max_length=2_5_6 , add_special_tokens=snake_case__ , return_attention_mask=snake_case__ , return_token_type_ids=snake_case__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 673 |
def __lowerCAmelCase ( _UpperCamelCase : int = 10_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 2**power
SCREAMING_SNAKE_CASE = str(_UpperCamelCase )
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
for i in list_num:
sum_of_num += int(_UpperCamelCase )
return sum_of_num
if __name__ == "__main__":
a_ : List[str] = int(input("Enter the power of 2: ").strip())
print("2 ^ ", power, " = ", 2**power)
a_ : int = solution(power)
print("Sum of the digits is: ", result)
| 673 | 1 |
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
a_ : Tuple = True
from torch.cuda.amp import autocast
a_ : int = logging.getLogger(__name__)
def __lowerCAmelCase ( _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : int=None ) -> str:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=_UpperCamelCase )
@dataclass
class UpperCamelCase :
__UpperCamelCase =field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__UpperCamelCase =field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__UpperCamelCase =field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
__UpperCamelCase =field(
default=0.1 , metadata={"help": "The dropout ratio for the attention probabilities."} )
__UpperCamelCase =field(
default=0.1 , metadata={"help": "The dropout ratio for activations inside the fully connected layer."} )
__UpperCamelCase =field(
default=0.1 , metadata={
"help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler."
} , )
__UpperCamelCase =field(
default=0.1 , metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."} , )
__UpperCamelCase =field(
default=0.05 , metadata={
"help": (
"Propability of each feature vector along the time axis to be chosen as the start of the vector"
"span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
"vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``."
)
} , )
__UpperCamelCase =field(default=0.0 , metadata={"help": "The LayerDrop probability."} )
@dataclass
class UpperCamelCase :
__UpperCamelCase =field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
__UpperCamelCase =field(
default="train+validation" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
__UpperCamelCase =field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
__UpperCamelCase =field(
default=SCREAMING_SNAKE_CASE , metadata={"help": "The number of processes to use for the preprocessing."} , )
__UpperCamelCase =field(
default=SCREAMING_SNAKE_CASE , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__UpperCamelCase =field(
default=SCREAMING_SNAKE_CASE , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
)
} , )
__UpperCamelCase =list_field(
default=[",", "?", ".", "!", "-", ";", ":", "\"\"", "%", "'", "\"", "�"] , metadata={"help": "A list of characters to remove from the transcripts."} , )
@dataclass
class UpperCamelCase :
__UpperCamelCase =42
__UpperCamelCase =True
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
def __call__( self : Optional[Any] , snake_case__ : List[Dict[str, Union[List[int], torch.Tensor]]] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [{'input_values': feature['input_values']} for feature in features]
SCREAMING_SNAKE_CASE = [{'input_ids': feature['labels']} for feature in features]
SCREAMING_SNAKE_CASE = self.processor.pad(
snake_case__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = self.processor.pad(
labels=snake_case__ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='pt' , )
# replace padding with -100 to ignore loss correctly
SCREAMING_SNAKE_CASE = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1 ) , -1_0_0 )
SCREAMING_SNAKE_CASE = labels
return batch
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def UpperCamelCase ( self : List[str] , snake_case__ : nn.Module , snake_case__ : Dict[str, Union[torch.Tensor, Any]] ):
"""simple docstring"""
model.train()
SCREAMING_SNAKE_CASE = self._prepare_inputs(snake_case__ )
if self.use_amp:
with autocast():
SCREAMING_SNAKE_CASE = self.compute_loss(snake_case__ , snake_case__ )
else:
SCREAMING_SNAKE_CASE = self.compute_loss(snake_case__ , snake_case__ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
SCREAMING_SNAKE_CASE = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
SCREAMING_SNAKE_CASE = loss.sum() / (inputs['labels'] >= 0).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
SCREAMING_SNAKE_CASE = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(snake_case__ ).backward()
elif self.use_apex:
with amp.scale_loss(snake_case__ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(snake_case__ )
else:
loss.backward()
return loss.detach()
def __lowerCAmelCase ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , _UpperCamelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
SCREAMING_SNAKE_CASE = datasets.load_dataset(
'common_voice' , data_args.dataset_config_name , split=data_args.train_split_name )
SCREAMING_SNAKE_CASE = datasets.load_dataset('common_voice' , data_args.dataset_config_name , split='test' )
# Create and save tokenizer
SCREAMING_SNAKE_CASE = f"""[{''.join(data_args.chars_to_ignore )}]"""
def remove_special_characters(_UpperCamelCase : Any ):
SCREAMING_SNAKE_CASE = re.sub(_UpperCamelCase , '' , batch['sentence'] ).lower() + ' '
return batch
SCREAMING_SNAKE_CASE = train_dataset.map(_UpperCamelCase , remove_columns=['sentence'] )
SCREAMING_SNAKE_CASE = eval_dataset.map(_UpperCamelCase , remove_columns=['sentence'] )
def extract_all_chars(_UpperCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = ' '.join(batch['text'] )
SCREAMING_SNAKE_CASE = list(set(_UpperCamelCase ) )
return {"vocab": [vocab], "all_text": [all_text]}
SCREAMING_SNAKE_CASE = train_dataset.map(
_UpperCamelCase , batched=_UpperCamelCase , batch_size=-1 , keep_in_memory=_UpperCamelCase , remove_columns=train_dataset.column_names , )
SCREAMING_SNAKE_CASE = train_dataset.map(
_UpperCamelCase , batched=_UpperCamelCase , batch_size=-1 , keep_in_memory=_UpperCamelCase , remove_columns=eval_dataset.column_names , )
SCREAMING_SNAKE_CASE = list(set(vocab_train['vocab'][0] ) | set(vocab_test['vocab'][0] ) )
SCREAMING_SNAKE_CASE = {v: k for k, v in enumerate(_UpperCamelCase )}
SCREAMING_SNAKE_CASE = vocab_dict[' ']
del vocab_dict[" "]
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
with open('vocab.json' , 'w' ) as vocab_file:
json.dump(_UpperCamelCase , _UpperCamelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE = WavaVecaCTCTokenizer(
'vocab.json' , unk_token='[UNK]' , pad_token='[PAD]' , word_delimiter_token='|' , )
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0.0 , do_normalize=_UpperCamelCase , return_attention_mask=_UpperCamelCase )
SCREAMING_SNAKE_CASE = WavaVecaProcessor(feature_extractor=_UpperCamelCase , tokenizer=_UpperCamelCase )
SCREAMING_SNAKE_CASE = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='mean' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE = min(len(_UpperCamelCase ) , data_args.max_train_samples )
SCREAMING_SNAKE_CASE = train_dataset.select(range(_UpperCamelCase ) )
if data_args.max_val_samples is not None:
SCREAMING_SNAKE_CASE = eval_dataset.select(range(data_args.max_val_samples ) )
SCREAMING_SNAKE_CASE = torchaudio.transforms.Resample(4_80_00 , 1_60_00 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(_UpperCamelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torchaudio.load(batch['path'] )
SCREAMING_SNAKE_CASE = resampler(_UpperCamelCase ).squeeze().numpy()
SCREAMING_SNAKE_CASE = 1_60_00
SCREAMING_SNAKE_CASE = batch['text']
return batch
SCREAMING_SNAKE_CASE = train_dataset.map(
_UpperCamelCase , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
SCREAMING_SNAKE_CASE = eval_dataset.map(
_UpperCamelCase , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(_UpperCamelCase : Union[str, Any] ):
# check that all files have the correct sampling rate
assert (
len(set(batch['sampling_rate'] ) ) == 1
), f"""Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."""
SCREAMING_SNAKE_CASE = processor(
audio=batch['speech'] , text=batch['target_text'] , sampling_rate=batch['sampling_rate'][0] )
batch.update(_UpperCamelCase )
return batch
SCREAMING_SNAKE_CASE = train_dataset.map(
_UpperCamelCase , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_UpperCamelCase , num_proc=data_args.preprocessing_num_workers , )
SCREAMING_SNAKE_CASE = eval_dataset.map(
_UpperCamelCase , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_UpperCamelCase , num_proc=data_args.preprocessing_num_workers , )
# Metric
SCREAMING_SNAKE_CASE = datasets.load_metric('wer' )
def compute_metrics(_UpperCamelCase : Dict ):
SCREAMING_SNAKE_CASE = pred.predictions
SCREAMING_SNAKE_CASE = np.argmax(_UpperCamelCase , axis=-1 )
SCREAMING_SNAKE_CASE = processor.tokenizer.pad_token_id
SCREAMING_SNAKE_CASE = processor.batch_decode(_UpperCamelCase )
# we do not want to group tokens when computing the metrics
SCREAMING_SNAKE_CASE = processor.batch_decode(pred.label_ids , group_tokens=_UpperCamelCase )
SCREAMING_SNAKE_CASE = wer_metric.compute(predictions=_UpperCamelCase , references=_UpperCamelCase )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
SCREAMING_SNAKE_CASE = DataCollatorCTCWithPadding(processor=_UpperCamelCase , padding=_UpperCamelCase )
# Initialize our Trainer
SCREAMING_SNAKE_CASE = CTCTrainer(
model=_UpperCamelCase , data_collator=_UpperCamelCase , args=_UpperCamelCase , compute_metrics=_UpperCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
SCREAMING_SNAKE_CASE = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
SCREAMING_SNAKE_CASE = model_args.model_name_or_path
else:
SCREAMING_SNAKE_CASE = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
SCREAMING_SNAKE_CASE = train_result.metrics
SCREAMING_SNAKE_CASE = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCamelCase )
)
SCREAMING_SNAKE_CASE = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.log_metrics('train' , _UpperCamelCase )
trainer.save_metrics('train' , _UpperCamelCase )
trainer.save_state()
# Evaluation
SCREAMING_SNAKE_CASE = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
SCREAMING_SNAKE_CASE = trainer.evaluate()
SCREAMING_SNAKE_CASE = data_args.max_val_samples if data_args.max_val_samples is not None else len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.log_metrics('eval' , _UpperCamelCase )
trainer.save_metrics('eval' , _UpperCamelCase )
return results
if __name__ == "__main__":
main()
| 673 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="facebook/bart-large-mnli"
__UpperCamelCase =(
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
__UpperCamelCase ="text_classifier"
__UpperCamelCase =AutoTokenizer
__UpperCamelCase =AutoModelForSequenceClassification
__UpperCamelCase =["text", ["text"]]
__UpperCamelCase =["text"]
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().setup()
SCREAMING_SNAKE_CASE = self.model.config
SCREAMING_SNAKE_CASE = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
SCREAMING_SNAKE_CASE = int(snake_case__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[str] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = labels
return self.pre_processor(
[text] * len(snake_case__ ) , [F"""This example is {label}""" for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCamelCase ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = outputs.logits
SCREAMING_SNAKE_CASE = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 673 | 1 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
a_ : Tuple = False
a_ : List[str] = False
def __lowerCAmelCase ( _UpperCamelCase : Namespace ) -> Optional[Any]:
'''simple docstring'''
return TrainCommand(_UpperCamelCase )
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
@staticmethod
def UpperCamelCase ( snake_case__ : ArgumentParser ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=snake_case__ , required=snake_case__ , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=snake_case__ , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=snake_case__ , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=snake_case__ , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=snake_case__ , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=snake_case__ , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=snake_case__ , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=snake_case__ , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=snake_case__ , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=snake_case__ , default=3_2 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=snake_case__ , default=6_4 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=snake_case__ , default=3E-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=snake_case__ , default=1E-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=snake_case__ )
def __init__( self : int , snake_case__ : Namespace ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = logging.get_logger('transformers-cli/training' )
SCREAMING_SNAKE_CASE = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=snake_case__ )
SCREAMING_SNAKE_CASE = args.output
SCREAMING_SNAKE_CASE = args.column_label
SCREAMING_SNAKE_CASE = args.column_text
SCREAMING_SNAKE_CASE = args.column_id
self.logger.info(F"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
SCREAMING_SNAKE_CASE = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"""Loading dataset from {args.train_data}""" )
SCREAMING_SNAKE_CASE = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
SCREAMING_SNAKE_CASE = None
if args.validation_data:
self.logger.info(F"""Loading validation dataset from {args.validation_data}""" )
SCREAMING_SNAKE_CASE = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
SCREAMING_SNAKE_CASE = args.validation_split
SCREAMING_SNAKE_CASE = args.train_batch_size
SCREAMING_SNAKE_CASE = args.valid_batch_size
SCREAMING_SNAKE_CASE = args.learning_rate
SCREAMING_SNAKE_CASE = args.adam_epsilon
def UpperCamelCase ( self : int ):
"""simple docstring"""
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
raise NotImplementedError
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 673 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
a_ : str = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
a_ : int = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.1_5},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
a_ : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a_ : List[Any] = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
a_ : Any = "allenai"
def __lowerCAmelCase ( _UpperCamelCase : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict((re.sub(R'@@$' , '' , _UpperCamelCase ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , _UpperCamelCase ), v) for k, v in d.items() )
SCREAMING_SNAKE_CASE = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
SCREAMING_SNAKE_CASE = d[k] # restore
return da
def __lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : List[str] ) -> Dict:
'''simple docstring'''
assert os.path.exists(_UpperCamelCase )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
SCREAMING_SNAKE_CASE = basename(_UpperCamelCase )
SCREAMING_SNAKE_CASE = dirname(_UpperCamelCase )
SCREAMING_SNAKE_CASE = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
SCREAMING_SNAKE_CASE = cls.hub_models()
SCREAMING_SNAKE_CASE = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
SCREAMING_SNAKE_CASE = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f"""using checkpoint {checkpoint_file}""" )
SCREAMING_SNAKE_CASE = hub_utils.from_pretrained(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , archive_map=_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = vars(chkpt['args']['model'] )
SCREAMING_SNAKE_CASE = args['source_lang']
SCREAMING_SNAKE_CASE = args['target_lang']
SCREAMING_SNAKE_CASE = dirname(_UpperCamelCase )
SCREAMING_SNAKE_CASE = basename(_UpperCamelCase )
# dicts
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , f"""dict.{src_lang}.txt""" )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , f"""dict.{tgt_lang}.txt""" )
SCREAMING_SNAKE_CASE = Dictionary.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = rewrite_dict_keys(src_dict.indices )
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'vocab-src.json' )
print(f"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
SCREAMING_SNAKE_CASE = True
for k in src_vocab.keys():
if not k.islower():
SCREAMING_SNAKE_CASE = False
break
SCREAMING_SNAKE_CASE = Dictionary.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = rewrite_dict_keys(tgt_dict.indices )
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'vocab-tgt.json' )
print(f"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# merges_file (bpecodes)
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
if os.path.exists(_UpperCamelCase ):
break
with open(_UpperCamelCase , encoding='utf-8' ) as fin:
SCREAMING_SNAKE_CASE = fin.read()
SCREAMING_SNAKE_CASE = re.sub(R' \d+$' , '' , _UpperCamelCase , 0 , re.M ) # remove frequency number
print(f"""Generating {merges_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as fout:
fout.write(_UpperCamelCase )
# model config
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f"""need to extend tokenizer to support bpe={args['bpe']}"""
assert args["tokenizer"] == "moses", f"""need to extend tokenizer to support bpe={args['tokenizer']}"""
SCREAMING_SNAKE_CASE = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
SCREAMING_SNAKE_CASE = 5
SCREAMING_SNAKE_CASE = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
SCREAMING_SNAKE_CASE = best_score_hparams[model_dir]['length_penalty']
else:
SCREAMING_SNAKE_CASE = 1.0
print(f"""Generating {fsmt_model_config_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# tokenizer config
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = {
'langs': [src_lang, tgt_lang],
'model_max_length': 10_24,
'do_lower_case': do_lower_case,
}
print(f"""Generating {fsmt_tokenizer_config_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# model
SCREAMING_SNAKE_CASE = chkpt['models'][0]
SCREAMING_SNAKE_CASE = model.state_dict()
# rename keys to start with 'model.'
SCREAMING_SNAKE_CASE = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
SCREAMING_SNAKE_CASE = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = FSMTConfig.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = FSMTForConditionalGeneration(_UpperCamelCase )
# check that it loads ok
model_new.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
# save
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(_UpperCamelCase , _UpperCamelCase )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f"""cd {data_root}""" )
print(f"""transformers-cli upload {model_dir}""" )
if __name__ == "__main__":
a_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a_ : int = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 673 | 1 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
def __init__( self : List[str] , snake_case__ : Dict , snake_case__ : Optional[Any]=1_3 , snake_case__ : Union[str, Any]=7 , snake_case__ : List[str]=True , snake_case__ : Any=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=9_9 , snake_case__ : str=3_2 , snake_case__ : Dict=5 , snake_case__ : str=4 , snake_case__ : int=3_7 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : Any=0.1 , snake_case__ : Dict=0.1 , snake_case__ : Optional[Any]=5_1_2 , snake_case__ : List[Any]=1_6 , snake_case__ : str=2 , snake_case__ : int=0.02 , snake_case__ : List[str]=3 , snake_case__ : Dict=4 , snake_case__ : str=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def UpperCamelCase ( self : List[str] , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ , token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : List[Any] , snake_case__ : str , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : int , snake_case__ : int , snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = NystromformerForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = NystromformerForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self : int , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = NystromformerForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase =(
{
"feature-extraction": NystromformerModel,
"fill-mask": NystromformerForMaskedLM,
"question-answering": NystromformerForQuestionAnswering,
"text-classification": NystromformerForSequenceClassification,
"token-classification": NystromformerForTokenClassification,
"zero-shot": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
@slow
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(snake_case__ )[0]
SCREAMING_SNAKE_CASE = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , snake_case__ )
SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.4_532, -0.0_936, 0.5_137], [-0.2_676, 0.0_628, 0.6_186], [-0.3_629, -0.1_726, 0.4_716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'the [MASK] of Belgium is Brussels'
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = tokenizer(snake_case__ , return_tensors='pt' )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(encoding.input_ids ).logits
SCREAMING_SNAKE_CASE = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(snake_case__ ) , 'capital' )
| 673 |
import random
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : float , _UpperCamelCase : bool = False ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {i: [] for i in range(_UpperCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_UpperCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_UpperCamelCase ):
for j in range(i + 1 , _UpperCamelCase ):
if random.random() < probability:
graph[i].append(_UpperCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_UpperCamelCase )
return graph
def __lowerCAmelCase ( _UpperCamelCase : int ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(_UpperCamelCase ) if i != j] for i in range(_UpperCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 | 1 |
def __lowerCAmelCase ( _UpperCamelCase : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE = set()
# Replace all the whitespace in our sentence
SCREAMING_SNAKE_CASE = input_str.replace(' ' , '' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(_UpperCamelCase ) == 26
def __lowerCAmelCase ( _UpperCamelCase : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [False] * 26
for char in input_str:
if char.islower():
SCREAMING_SNAKE_CASE = True
elif char.isupper():
SCREAMING_SNAKE_CASE = True
return all(_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def __lowerCAmelCase ( ) -> None:
'''simple docstring'''
from timeit import timeit
SCREAMING_SNAKE_CASE = 'from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'
print(timeit('is_pangram()' , setup=_UpperCamelCase ) )
print(timeit('is_pangram_faster()' , setup=_UpperCamelCase ) )
print(timeit('is_pangram_fastest()' , setup=_UpperCamelCase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 673 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
def __init__( self : List[str] , snake_case__ : Dict , snake_case__ : Optional[Any]=1_3 , snake_case__ : Union[str, Any]=7 , snake_case__ : List[str]=True , snake_case__ : Any=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=9_9 , snake_case__ : str=3_2 , snake_case__ : Dict=5 , snake_case__ : str=4 , snake_case__ : int=3_7 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : Any=0.1 , snake_case__ : Dict=0.1 , snake_case__ : Optional[Any]=5_1_2 , snake_case__ : List[Any]=1_6 , snake_case__ : str=2 , snake_case__ : int=0.02 , snake_case__ : List[str]=3 , snake_case__ : Dict=4 , snake_case__ : str=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def UpperCamelCase ( self : List[str] , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ , token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : List[Any] , snake_case__ : str , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : int , snake_case__ : int , snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = NystromformerForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = NystromformerForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self : int , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = NystromformerForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase =(
{
"feature-extraction": NystromformerModel,
"fill-mask": NystromformerForMaskedLM,
"question-answering": NystromformerForQuestionAnswering,
"text-classification": NystromformerForSequenceClassification,
"token-classification": NystromformerForTokenClassification,
"zero-shot": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
@slow
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(snake_case__ )[0]
SCREAMING_SNAKE_CASE = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , snake_case__ )
SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.4_532, -0.0_936, 0.5_137], [-0.2_676, 0.0_628, 0.6_186], [-0.3_629, -0.1_726, 0.4_716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'the [MASK] of Belgium is Brussels'
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = tokenizer(snake_case__ , return_tensors='pt' )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(encoding.input_ids ).logits
SCREAMING_SNAKE_CASE = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(snake_case__ ) , 'capital' )
| 673 | 1 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =BertTokenizer
__UpperCamelCase =BertTokenizerFast
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =filter_non_english
def UpperCamelCase ( self : Any ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCamelCase ( self : Any , snake_case__ : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'UNwant\u00E9d,running'
SCREAMING_SNAKE_CASE = 'unwanted, running'
return input_text, output_text
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(snake_case__ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def UpperCamelCase ( self : str ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = 'UNwant\u00E9d,running'
SCREAMING_SNAKE_CASE = tokenizer.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = tokenizer.encode(snake_case__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
# With lower casing
SCREAMING_SNAKE_CASE = self.get_tokenizer(do_lower_case=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer(do_lower_case=snake_case__ )
SCREAMING_SNAKE_CASE = 'UNwant\u00E9d,running'
SCREAMING_SNAKE_CASE = tokenizer.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = tokenizer.encode(snake_case__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=snake_case__ , strip_accents=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=snake_case__ , strip_accents=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=snake_case__ , strip_accents=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=snake_case__ , strip_accents=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=snake_case__ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = BasicTokenizer()
SCREAMING_SNAKE_CASE = 'a\n\'ll !!to?\'d of, can\'t.'
SCREAMING_SNAKE_CASE = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(snake_case__ ) , snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(snake_case__ ):
SCREAMING_SNAKE_CASE = i
SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=snake_case__ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(snake_case__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(snake_case__ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained('bert-base-uncased' )
SCREAMING_SNAKE_CASE = tokenizer.encode('sequence builders' , add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer.encode('multi-sequence build' , add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
SCREAMING_SNAKE_CASE = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(
snake_case__ , return_attention_mask=snake_case__ , return_token_type_ids=snake_case__ , return_offsets_mapping=snake_case__ , add_special_tokens=snake_case__ , )
SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(snake_case__ , 'do_lower_case' ) else False
SCREAMING_SNAKE_CASE = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'Allen'),
((2_1, 2_3), '##NL'),
((2_3, 2_4), '##P'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'allen'),
((2_1, 2_3), '##nl'),
((2_3, 2_4), '##p'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ['的', '人', '有']
SCREAMING_SNAKE_CASE = ''.join(snake_case__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer_p.encode(snake_case__ , add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer_r.encode(snake_case__ , add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(snake_case__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(snake_case__ , snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(snake_case__ , **snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer_r.encode(snake_case__ , add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer_p.encode(snake_case__ , add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(snake_case__ )
# it is expected that only the first Chinese character is not preceded by "##".
SCREAMING_SNAKE_CASE = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(snake_case__ )
]
self.assertListEqual(snake_case__ , snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
| 673 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 673 | 1 |
from __future__ import annotations
def __lowerCAmelCase ( _UpperCamelCase : int | float | str , _UpperCamelCase : int | float | str ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
SCREAMING_SNAKE_CASE = int(_UpperCamelCase )
SCREAMING_SNAKE_CASE = int(_UpperCamelCase )
SCREAMING_SNAKE_CASE = []
for temp in range(int(_UpperCamelCase ) ):
series.append(f"""1 / {pow(temp + 1 , int(_UpperCamelCase ) )}""" if series else '1' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : str = int(input("Enter the last number (nth term) of the P-Series"))
a_ : str = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 673 |
import heapq
import sys
import numpy as np
a_ : Optional[int] = tuple[int, int]
class UpperCamelCase :
def __init__( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('inf' )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return len(self.elements ) == 0
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(snake_case__ )
else:
# update
# print("update", item)
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def UpperCamelCase ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
if item in self.set:
self.set.remove(snake_case__ )
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def UpperCamelCase ( self : str ):
"""simple docstring"""
return self.elements[0][1]
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
self.set.remove(snake_case__ )
return (priority, item)
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
return np.linalg.norm(a - b )
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Dict:
'''simple docstring'''
return consistent_heuristic(_UpperCamelCase , _UpperCamelCase ) // t
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Optional[int]:
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : int , _UpperCamelCase : TPos , _UpperCamelCase : dict[TPos, float] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = g_function[start] + Wa * heuristics[i](_UpperCamelCase , _UpperCamelCase )
return ans
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.chararray((n, n) )
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = '*'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (j, (n - 1) - i) in blocks:
SCREAMING_SNAKE_CASE = '#'
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = x
# print(x)
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[x]
SCREAMING_SNAKE_CASE = '-'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
print(_UpperCamelCase , end=' ' )
SCREAMING_SNAKE_CASE = back_pointer[x]
print(_UpperCamelCase )
sys.exit()
def __lowerCAmelCase ( _UpperCamelCase : TPos ) -> Any:
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , ) -> List[Any]:
'''simple docstring'''
for itera in range(_UpperCamelCase ):
open_list[itera].remove_element(_UpperCamelCase )
# print("s", s)
# print("j", j)
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = s
SCREAMING_SNAKE_CASE = (x - 1, y)
SCREAMING_SNAKE_CASE = (x + 1, y)
SCREAMING_SNAKE_CASE = (x, y + 1)
SCREAMING_SNAKE_CASE = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_UpperCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_UpperCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = float('inf' )
if valid(_UpperCamelCase ) and g_function[neighbours] > g_function[s] + 1:
SCREAMING_SNAKE_CASE = g_function[s] + 1
SCREAMING_SNAKE_CASE = s
if neighbours not in close_list_anchor:
open_list[0].put(_UpperCamelCase , key(_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , _UpperCamelCase ):
if key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) <= Wa * key(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ):
open_list[j].put(
_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
def __lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a_ : str = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a_ : List[str] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a_ : Union[str, Any] = make_common_ground()
a_ : Tuple = blocks_blk
# hyper parameters
a_ : Any = 1
a_ : List[str] = 1
a_ : Union[str, Any] = 20
a_ : Optional[Any] = 3 # one consistent and two other inconsistent
# start and end destination
a_ : int = (0, 0)
a_ : Optional[int] = (n - 1, n - 1)
a_ : Union[str, Any] = 1
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos , _UpperCamelCase : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {start: 0, goal: float('inf' )}
SCREAMING_SNAKE_CASE = {start: -1, goal: -1}
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
for i in range(_UpperCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , _UpperCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = open_list[i].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_inad.append(_UpperCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = open_list[0].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_anchor.append(_UpperCamelCase )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_UpperCamelCase ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 673 | 1 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase :
@staticmethod
def UpperCamelCase ( *snake_case__ : List[str] , **snake_case__ : List[str] ):
"""simple docstring"""
pass
def __lowerCAmelCase ( _UpperCamelCase : Image ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def __lowerCAmelCase ( _UpperCamelCase : Image ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
SCREAMING_SNAKE_CASE = npimg.shape
return {"hash": hashimage(_UpperCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class UpperCamelCase ( unittest.TestCase ):
__UpperCamelCase =dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
__UpperCamelCase =dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def UpperCamelCase ( self : List[Any] , snake_case__ : str , snake_case__ : Dict , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MaskGenerationPipeline(model=snake_case__ , image_processor=snake_case__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCamelCase ( self : int , snake_case__ : Tuple , snake_case__ : Any ):
"""simple docstring"""
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
pass
@slow
@require_torch
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = pipeline('mask-generation' , model='facebook/sam-vit-huge' )
SCREAMING_SNAKE_CASE = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=2_5_6 )
# Shortening by hashing
SCREAMING_SNAKE_CASE = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(snake_case__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.0_444},
{'mask': {'hash': '6affa964c6', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.021},
{'mask': {'hash': 'dfe28a0388', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.0_167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.0_132},
{'mask': {'hash': 'fe8065c197', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.0_053},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9_967},
{'mask': {'hash': '453c7844bd', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.993},
{'mask': {'hash': '3d44f2926d', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9_909},
{'mask': {'hash': '64033ddc3f', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9_879},
{'mask': {'hash': '801064ff79', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9_834},
{'mask': {'hash': '6172f276ef', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9_716},
{'mask': {'hash': 'b49e60e084', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9_612},
{'mask': {'hash': 'a811e775fd', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9_599},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9_552},
{'mask': {'hash': '9d8257e080', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9_532},
{'mask': {'hash': '32de6454a8', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9_516},
{'mask': {'hash': 'af3d4af2c8', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9_499},
{'mask': {'hash': '3c6db475fb', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9_483},
{'mask': {'hash': 'c290813fb9', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9_464},
{'mask': {'hash': 'b6f0b8f606', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.943},
{'mask': {'hash': '92ce16bfdf', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.943},
{'mask': {'hash': 'c749b25868', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9_408},
{'mask': {'hash': 'efb6cab859', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9_335},
{'mask': {'hash': '1ff2eafb30', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9_326},
{'mask': {'hash': '788b798e24', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.9_262},
{'mask': {'hash': 'abea804f0e', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.8_999},
{'mask': {'hash': '7b9e8ddb73', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.8_986},
{'mask': {'hash': 'cd24047c8a', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.8_984},
{'mask': {'hash': '6943e6bcbd', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.8_873},
{'mask': {'hash': 'b5f47c9191', 'shape': (4_8_0, 6_4_0)}, 'scores': 0.8_871}
] , )
# fmt: on
@require_torch
@slow
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'facebook/sam-vit-huge'
SCREAMING_SNAKE_CASE = pipeline('mask-generation' , model=snake_case__ )
SCREAMING_SNAKE_CASE = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=2_5_6 )
# Shortening by hashing
SCREAMING_SNAKE_CASE = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(snake_case__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.0_444},
{'mask': {'hash': '6affa964c6', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.0_210},
{'mask': {'hash': 'dfe28a0388', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.0_167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.0_132},
{'mask': {'hash': 'fe8065c197', 'shape': (4_8_0, 6_4_0)}, 'scores': 1.0_053},
] , )
| 673 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ : str = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Any=8 ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
SCREAMING_SNAKE_CASE = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=5_12 , _UpperCamelCase : Union[str, Any]=5_12 ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
SCREAMING_SNAKE_CASE = np.array(pil_image.convert('RGB' ) )
SCREAMING_SNAKE_CASE = arr.astype(np.floataa ) / 1_27.5 - 1
SCREAMING_SNAKE_CASE = np.transpose(_UpperCamelCase , [2, 0, 1] )
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).unsqueeze(0 )
return image
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : int , snake_case__ : UNetaDConditionModel , snake_case__ : DDPMScheduler , snake_case__ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=snake_case__ , scheduler=snake_case__ , movq=snake_case__ , )
SCREAMING_SNAKE_CASE = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase ( self : Any , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = min(int(num_inference_steps * strength ) , snake_case__ )
SCREAMING_SNAKE_CASE = max(num_inference_steps - init_timestep , 0 )
SCREAMING_SNAKE_CASE = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase ( self : List[str] , snake_case__ : Dict , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : str=None ):
"""simple docstring"""
if not isinstance(snake_case__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(snake_case__ )}""" )
SCREAMING_SNAKE_CASE = image.to(device=snake_case__ , dtype=snake_case__ )
SCREAMING_SNAKE_CASE = batch_size * num_images_per_prompt
if image.shape[1] == 4:
SCREAMING_SNAKE_CASE = image
else:
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(snake_case__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case__ )
]
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
else:
SCREAMING_SNAKE_CASE = self.movq.encode(snake_case__ ).latent_dist.sample(snake_case__ )
SCREAMING_SNAKE_CASE = self.movq.config.scaling_factor * init_latents
SCREAMING_SNAKE_CASE = torch.cat([init_latents] , dim=0 )
SCREAMING_SNAKE_CASE = init_latents.shape
SCREAMING_SNAKE_CASE = randn_tensor(snake_case__ , generator=snake_case__ , device=snake_case__ , dtype=snake_case__ )
# get latents
SCREAMING_SNAKE_CASE = self.scheduler.add_noise(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = init_latents
return latents
def UpperCamelCase ( self : int , snake_case__ : List[str]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
SCREAMING_SNAKE_CASE = torch.device(F"""cuda:{gpu_id}""" )
SCREAMING_SNAKE_CASE = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case__ , snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[int]=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
SCREAMING_SNAKE_CASE = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=snake_case__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE = None
for cpu_offloaded_model in [self.unet, self.movq]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cpu_offload_with_hook(snake_case__ , snake_case__ , prev_module_hook=snake_case__ )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case__ )
def __call__( self : str , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : int = 5_1_2 , snake_case__ : int = 5_1_2 , snake_case__ : int = 1_0_0 , snake_case__ : float = 4.0 , snake_case__ : float = 0.3 , snake_case__ : int = 1 , snake_case__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._execution_device
SCREAMING_SNAKE_CASE = guidance_scale > 1.0
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = image_embeds.shape[0]
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE = image_embeds.repeat_interleave(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = negative_image_embeds.repeat_interleave(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = [image]
if not all(isinstance(snake_case__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(snake_case__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
SCREAMING_SNAKE_CASE = torch.cat([prepare_image(snake_case__ , snake_case__ , snake_case__ ) for i in image] , dim=0 )
SCREAMING_SNAKE_CASE = image.to(dtype=image_embeds.dtype , device=snake_case__ )
SCREAMING_SNAKE_CASE = self.movq.encode(snake_case__ )['latents']
SCREAMING_SNAKE_CASE = latents.repeat_interleave(snake_case__ , dim=0 )
self.scheduler.set_timesteps(snake_case__ , device=snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_timesteps(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = timesteps[:1].repeat(batch_size * num_images_per_prompt )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = downscale_height_and_width(snake_case__ , snake_case__ , self.movq_scale_factor )
SCREAMING_SNAKE_CASE = self.prepare_latents(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , image_embeds.dtype , snake_case__ , snake_case__ )
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE = {'image_embeds': image_embeds}
SCREAMING_SNAKE_CASE = self.unet(
sample=snake_case__ , timestep=snake_case__ , encoder_hidden_states=snake_case__ , added_cond_kwargs=snake_case__ , return_dict=snake_case__ , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE = self.scheduler.step(
snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ , )[0]
# post-processing
SCREAMING_SNAKE_CASE = self.movq.decode(snake_case__ , force_not_quantize=snake_case__ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case__ )
| 673 | 1 |
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
if index == r:
for j in range(_UpperCamelCase ):
print(data[j] , end=' ' )
print(' ' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
SCREAMING_SNAKE_CASE = arr[i]
combination_util(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , index + 1 , _UpperCamelCase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , 0 , _UpperCamelCase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
a_ : Union[str, Any] = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 673 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
a_ : List[Any] = logging.get_logger("transformers.models.speecht5")
def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Dict:
'''simple docstring'''
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE = checkpoint['input_conv.weight_g']
SCREAMING_SNAKE_CASE = checkpoint['input_conv.weight_v']
SCREAMING_SNAKE_CASE = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.weight_g']
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.weight_v']
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : Any=None , _UpperCamelCase : List[str]=None , ) -> Tuple:
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig.from_pretrained(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.load(_UpperCamelCase )
load_weights(orig_checkpoint['model']['generator'] , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = np.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).float()
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).float()
model.save_pretrained(_UpperCamelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
a_ : Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 673 | 1 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
a_ : Tuple = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"text-classification",
"language-modeling",
"summarization",
"token-classification",
"question-answering",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
a_ : List[str] = logging.getLogger()
def __lowerCAmelCase ( ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('-f' )
SCREAMING_SNAKE_CASE = parser.parse_args()
return args.f
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : Dict="eval" ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , f"""{split}_results.json""" )
if os.path.exists(_UpperCamelCase ):
with open(_UpperCamelCase , 'r' ) as f:
return json.load(_UpperCamelCase )
raise ValueError(f"""can't find {path}""" )
a_ : Dict = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE = F"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(snake_case__ , 'argv' , snake_case__ ):
run_flax_glue.main()
SCREAMING_SNAKE_CASE = get_results(snake_case__ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
@slow
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE = F"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(snake_case__ , 'argv' , snake_case__ ):
run_clm_flax.main()
SCREAMING_SNAKE_CASE = get_results(snake_case__ )
self.assertLess(result['eval_perplexity'] , 1_0_0 )
@slow
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE = F"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(snake_case__ , 'argv' , snake_case__ ):
run_summarization_flax.main()
SCREAMING_SNAKE_CASE = get_results(snake_case__ , split='test' )
self.assertGreaterEqual(result['test_rouge1'] , 1_0 )
self.assertGreaterEqual(result['test_rouge2'] , 2 )
self.assertGreaterEqual(result['test_rougeL'] , 7 )
self.assertGreaterEqual(result['test_rougeLsum'] , 7 )
@slow
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE = F"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(snake_case__ , 'argv' , snake_case__ ):
run_mlm_flax.main()
SCREAMING_SNAKE_CASE = get_results(snake_case__ )
self.assertLess(result['eval_perplexity'] , 4_2 )
@slow
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE = F"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(snake_case__ , 'argv' , snake_case__ ):
run_ta_mlm_flax.main()
SCREAMING_SNAKE_CASE = get_results(snake_case__ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.42 )
@slow
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 7 if get_gpu_count() > 1 else 2
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE = F"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(snake_case__ , 'argv' , snake_case__ ):
run_flax_ner.main()
SCREAMING_SNAKE_CASE = get_results(snake_case__ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
self.assertGreaterEqual(result['eval_f1'] , 0.3 )
@slow
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE = F"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(snake_case__ , 'argv' , snake_case__ ):
run_qa.main()
SCREAMING_SNAKE_CASE = get_results(snake_case__ )
self.assertGreaterEqual(result['eval_f1'] , 3_0 )
self.assertGreaterEqual(result['eval_exact'] , 3_0 )
| 673 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
a_ : List[Any] = {
"allenai/led-base-16384": 1_6384,
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =LEDTokenizer
__UpperCamelCase =["input_ids", "attention_mask"]
def __init__( self : Tuple , snake_case__ : List[Any]=None , snake_case__ : List[str]=None , snake_case__ : List[str]=None , snake_case__ : Dict="replace" , snake_case__ : Tuple="<s>" , snake_case__ : Optional[Any]="</s>" , snake_case__ : int="</s>" , snake_case__ : Dict="<s>" , snake_case__ : Union[str, Any]="<unk>" , snake_case__ : Optional[int]="<pad>" , snake_case__ : List[str]="<mask>" , snake_case__ : List[Any]=False , snake_case__ : int=True , **snake_case__ : Dict , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = pre_tok_class(**snake_case__ )
SCREAMING_SNAKE_CASE = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE = 'post_processor'
SCREAMING_SNAKE_CASE = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE = tuple(state['cls'] )
SCREAMING_SNAKE_CASE = False
if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = True
if state.get('trim_offsets' , snake_case__ ) != trim_offsets:
SCREAMING_SNAKE_CASE = trim_offsets
SCREAMING_SNAKE_CASE = True
if changes_to_apply:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , state.pop('type' ) )
SCREAMING_SNAKE_CASE = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase ( self : List[Any] , snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
SCREAMING_SNAKE_CASE = value
def UpperCamelCase ( self : Dict , *snake_case__ : Optional[Any] , **snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : List[str] , *snake_case__ : List[Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : Tuple=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Union[Dict[str, EncodedInput], BatchEncoding] , snake_case__ : Optional[int] = None , snake_case__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = super()._pad(
encoded_inputs=snake_case__ , max_length=snake_case__ , padding_strategy=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE = len(encoded_inputs['global_attention_mask'] ) != len(snake_case__ )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE = len(snake_case__ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 673 | 1 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
a_ : Optional[int] = logging.getLogger(__name__)
a_ : Any = tf.data.AUTOTUNE
def __lowerCAmelCase ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description='Train a masked language model on TPU.' )
parser.add_argument(
'--pretrained_model_config' , type=_UpperCamelCase , default='roberta-base' , help='The model config to use. Note that we don\'t copy the model\'s weights, only the config!' , )
parser.add_argument(
'--tokenizer' , type=_UpperCamelCase , default='unigram-tokenizer-wikitext' , help='The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.' , )
parser.add_argument(
'--per_replica_batch_size' , type=_UpperCamelCase , default=8 , help='Batch size per TPU core.' , )
parser.add_argument(
'--no_tpu' , action='store_true' , help='If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.' , )
parser.add_argument(
'--tpu_name' , type=_UpperCamelCase , help='Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.' , default='local' , )
parser.add_argument(
'--tpu_zone' , type=_UpperCamelCase , help='Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.' , )
parser.add_argument(
'--gcp_project' , type=_UpperCamelCase , help='Google cloud project name. Only used for non-Colab TPU nodes.' )
parser.add_argument(
'--bfloat16' , action='store_true' , help='Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.' , )
parser.add_argument(
'--train_dataset' , type=_UpperCamelCase , help='Path to training dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--shuffle_buffer_size' , type=_UpperCamelCase , default=2**18 , help='Size of the shuffle buffer (in samples)' , )
parser.add_argument(
'--eval_dataset' , type=_UpperCamelCase , help='Path to evaluation dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--num_epochs' , type=_UpperCamelCase , default=1 , help='Number of epochs to train for.' , )
parser.add_argument(
'--learning_rate' , type=_UpperCamelCase , default=1e-4 , help='Learning rate to use for training.' , )
parser.add_argument(
'--weight_decay_rate' , type=_UpperCamelCase , default=1e-3 , help='Weight decay rate to use for training.' , )
parser.add_argument(
'--max_length' , type=_UpperCamelCase , default=5_12 , help='Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py' , )
parser.add_argument(
'--mlm_probability' , type=_UpperCamelCase , default=0.15 , help='Fraction of tokens to mask during training.' , )
parser.add_argument('--output_dir' , type=_UpperCamelCase , required=_UpperCamelCase , help='Path to save model checkpoints to.' )
parser.add_argument('--hub_model_id' , type=_UpperCamelCase , help='Model ID to upload to on the Hugging Face Hub.' )
SCREAMING_SNAKE_CASE = parser.parse_args()
return args
def __lowerCAmelCase ( _UpperCamelCase : str ) -> int:
'''simple docstring'''
try:
if args.tpu_name:
SCREAMING_SNAKE_CASE = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
SCREAMING_SNAKE_CASE = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '
'--gcp_project. When running on a TPU VM, use --tpu_name local.' )
tf.config.experimental_connect_to_cluster(_UpperCamelCase )
tf.tpu.experimental.initialize_tpu_system(_UpperCamelCase )
return tpu
def __lowerCAmelCase ( _UpperCamelCase : Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
for file in file_list:
SCREAMING_SNAKE_CASE = file.split('/' )[-1]
SCREAMING_SNAKE_CASE = re.search(R'-\d+-(\d+)\.tfrecord' , _UpperCamelCase ).group(1 )
SCREAMING_SNAKE_CASE = int(_UpperCamelCase )
num_samples += sample_count
return num_samples
def __lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : str=None ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = count_samples(_UpperCamelCase )
SCREAMING_SNAKE_CASE = tf.data.Dataset.from_tensor_slices(_UpperCamelCase )
if shuffle:
SCREAMING_SNAKE_CASE = dataset.shuffle(len(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = tf.data.TFRecordDataset(_UpperCamelCase , num_parallel_reads=_UpperCamelCase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
SCREAMING_SNAKE_CASE = dataset.apply(tf.data.experimental.assert_cardinality(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = dataset.map(_UpperCamelCase , num_parallel_calls=_UpperCamelCase )
if shuffle:
assert shuffle_buffer_size is not None
SCREAMING_SNAKE_CASE = dataset.shuffle(args.shuffle_buffer_size )
SCREAMING_SNAKE_CASE = dataset.batch(_UpperCamelCase , drop_remainder=_UpperCamelCase )
SCREAMING_SNAKE_CASE = dataset.map(_UpperCamelCase , num_parallel_calls=_UpperCamelCase )
SCREAMING_SNAKE_CASE = dataset.prefetch(_UpperCamelCase )
return dataset
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] ) -> int:
'''simple docstring'''
if not args.no_tpu:
SCREAMING_SNAKE_CASE = initialize_tpu(_UpperCamelCase )
SCREAMING_SNAKE_CASE = tf.distribute.TPUStrategy(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = tf.distribute.OneDeviceStrategy(device='/gpu:0' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('mixed_bfloat16' )
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(args.tokenizer )
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(args.pretrained_model_config )
SCREAMING_SNAKE_CASE = tokenizer.vocab_size
SCREAMING_SNAKE_CASE = tf.io.gfile.glob(os.path.join(args.train_dataset , '*.tfrecord' ) )
if not training_records:
raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" )
SCREAMING_SNAKE_CASE = tf.io.gfile.glob(os.path.join(args.eval_dataset , '*.tfrecord' ) )
if not eval_records:
raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" )
SCREAMING_SNAKE_CASE = count_samples(_UpperCamelCase )
SCREAMING_SNAKE_CASE = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
SCREAMING_SNAKE_CASE = steps_per_epoch * args.num_epochs
with strategy.scope():
SCREAMING_SNAKE_CASE = TFAutoModelForMaskedLM.from_config(_UpperCamelCase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = create_optimizer(
num_train_steps=_UpperCamelCase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=_UpperCamelCase , metrics=['accuracy'] )
def decode_fn(_UpperCamelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE = {
'input_ids': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'attention_mask': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(_UpperCamelCase , _UpperCamelCase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
SCREAMING_SNAKE_CASE = DataCollatorForLanguageModeling(
tokenizer=_UpperCamelCase , mlm_probability=args.mlm_probability , mlm=_UpperCamelCase , return_tensors='tf' )
def mask_with_collator(_UpperCamelCase : Any ):
# TF really needs an isin() function
SCREAMING_SNAKE_CASE = (
~tf.cast(batch['attention_mask'] , tf.bool )
| (batch['input_ids'] == tokenizer.cls_token_id)
| (batch['input_ids'] == tokenizer.sep_token_id)
)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = data_collator.tf_mask_tokens(
batch['input_ids'] , vocab_size=len(_UpperCamelCase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_UpperCamelCase , )
return batch
SCREAMING_SNAKE_CASE = args.per_replica_batch_size * strategy.num_replicas_in_sync
SCREAMING_SNAKE_CASE = prepare_dataset(
_UpperCamelCase , decode_fn=_UpperCamelCase , mask_fn=_UpperCamelCase , batch_size=_UpperCamelCase , shuffle=_UpperCamelCase , shuffle_buffer_size=args.shuffle_buffer_size , )
SCREAMING_SNAKE_CASE = prepare_dataset(
_UpperCamelCase , decode_fn=_UpperCamelCase , mask_fn=_UpperCamelCase , batch_size=_UpperCamelCase , shuffle=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_UpperCamelCase ) )
model.fit(
_UpperCamelCase , validation_data=_UpperCamelCase , epochs=args.num_epochs , callbacks=_UpperCamelCase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
a_ : Union[str, Any] = parse_args()
main(args)
| 673 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __lowerCAmelCase ( *_UpperCamelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(_UpperCamelCase , 'r' ) as fh:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_EX )
try:
print(*_UpperCamelCase )
finally:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_UN )
a_ : int = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
a_ : str = torch.device("cuda", local_rank)
a_ : Optional[int] = socket.gethostname()
a_ : Union[str, Any] = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
a_ : Dict = dist.get_rank()
a_ : Any = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 673 | 1 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase =1
@register_to_config
def __init__( self : int , snake_case__ : int = 1_0_0_0 , snake_case__ : Optional[Union[np.ndarray, List[float]]] = None ):
"""simple docstring"""
self.set_timesteps(snake_case__ )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
SCREAMING_SNAKE_CASE = 4
# running values
SCREAMING_SNAKE_CASE = []
def UpperCamelCase ( self : int , snake_case__ : int , snake_case__ : Union[str, torch.device] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = num_inference_steps
SCREAMING_SNAKE_CASE = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
SCREAMING_SNAKE_CASE = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
SCREAMING_SNAKE_CASE = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE = torch.sin(steps * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE = (1.0 - self.betas**2) ** 0.5
SCREAMING_SNAKE_CASE = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
SCREAMING_SNAKE_CASE = timesteps.to(snake_case__ )
SCREAMING_SNAKE_CASE = []
def UpperCamelCase ( self : List[str] , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : bool = True , ):
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
SCREAMING_SNAKE_CASE = (self.timesteps == timestep).nonzero().item()
SCREAMING_SNAKE_CASE = timestep_index + 1
SCREAMING_SNAKE_CASE = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(snake_case__ )
if len(self.ets ) == 1:
SCREAMING_SNAKE_CASE = self.ets[-1]
elif len(self.ets ) == 2:
SCREAMING_SNAKE_CASE = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
SCREAMING_SNAKE_CASE = (2_3 * self.ets[-1] - 1_6 * self.ets[-2] + 5 * self.ets[-3]) / 1_2
else:
SCREAMING_SNAKE_CASE = (1 / 2_4) * (5_5 * self.ets[-1] - 5_9 * self.ets[-2] + 3_7 * self.ets[-3] - 9 * self.ets[-4])
SCREAMING_SNAKE_CASE = self._get_prev_sample(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case__ )
def UpperCamelCase ( self : int , snake_case__ : torch.FloatTensor , *snake_case__ : List[Any] , **snake_case__ : int ):
"""simple docstring"""
return sample
def UpperCamelCase ( self : Dict , snake_case__ : str , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.alphas[timestep_index]
SCREAMING_SNAKE_CASE = self.betas[timestep_index]
SCREAMING_SNAKE_CASE = self.alphas[prev_timestep_index]
SCREAMING_SNAKE_CASE = self.betas[prev_timestep_index]
SCREAMING_SNAKE_CASE = (sample - sigma * ets) / max(snake_case__ , 1E-8 )
SCREAMING_SNAKE_CASE = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Any ):
"""simple docstring"""
return self.config.num_train_timesteps
| 673 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
a_ : Union[str, Any] = {"allegro/herbert-base-cased": 514}
a_ : List[Any] = {}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =HerbertTokenizer
def __init__( self : Tuple , snake_case__ : Optional[Any]=None , snake_case__ : int=None , snake_case__ : Optional[int]=None , snake_case__ : str="<s>" , snake_case__ : Tuple="<unk>" , snake_case__ : List[str]="<pad>" , snake_case__ : Tuple="<mask>" , snake_case__ : Dict="</s>" , **snake_case__ : List[str] , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , sep_token=snake_case__ , **snake_case__ , )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 673 | 1 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =42
__UpperCamelCase =42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 673 |
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = abs(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
while n > 0:
res += n % 10
n //= 10
return res
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = abs(_UpperCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
return sum(int(_UpperCamelCase ) for c in str(abs(_UpperCamelCase ) ) )
def __lowerCAmelCase ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_UpperCamelCase : Callable , _UpperCamelCase : int ) -> None:
SCREAMING_SNAKE_CASE = f"""{func.__name__}({value})"""
SCREAMING_SNAKE_CASE = timeit(f"""__main__.{call}""" , setup='import __main__' )
print(f"""{call:56} = {func(_UpperCamelCase )} -- {timing:.4f} seconds""" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_UpperCamelCase , _UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 673 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.