code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import numpy as np
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
return np.where(vector > 0,_UpperCAmelCase,(alpha * (np.exp(_UpperCAmelCase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[str]):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
SCREAMING_SNAKE_CASE_: Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]]
SCREAMING_SNAKE_CASE_: Any = DisjunctiveConstraint(lowerCAmelCase__)
self.assertTrue(isinstance(dc.token_ids , lowerCAmelCase__))
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]]))
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])])
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
SCREAMING_SNAKE_CASE_: Union[str, Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint(lowerCAmelCase__) # fails here
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: List[str] = [[1, 2, 3], [1, 2, 4]]
SCREAMING_SNAKE_CASE_: Tuple = DisjunctiveConstraint(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = dc.update(1)
SCREAMING_SNAKE_CASE_: Dict = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = dc.update(2)
SCREAMING_SNAKE_CASE_: Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = dc.update(3)
SCREAMING_SNAKE_CASE_: Tuple = stepped is True and completed is True and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3])
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
SCREAMING_SNAKE_CASE_: List[Any] = DisjunctiveConstraint(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = dc.update(4)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2, 4])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5])
dc.reset()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 3)
self.assertTrue(dc.current_seq == [1])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 2)
self.assertTrue(dc.current_seq == [1, 2])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.remaining() == 0)
self.assertTrue(dc.current_seq == [1, 2, 5])
| 13 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class UpperCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
__A : Optional[int] = '''resnet'''
__A : Optional[Any] = ['''basic''', '''bottleneck''']
def __init__( self , __A=3 , __A=64 , __A=[256, 512, 1024, 2048] , __A=[3, 4, 6, 3] , __A="bottleneck" , __A="relu" , __A=False , __A=None , __A=None , **__A , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
lowerCamelCase : Union[str, Any] = num_channels
lowerCamelCase : Optional[int] = embedding_size
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : List[Any] = depths
lowerCamelCase : List[Any] = layer_type
lowerCamelCase : Any = hidden_act
lowerCamelCase : Any = downsample_in_first_stage
lowerCamelCase : Tuple = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(lowerCAmelCase__ ) + 1 )]
lowerCamelCase : Dict = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names )
class UpperCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
__A : Dict = version.parse("1.11" )
@property
def _snake_case ( self ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _snake_case ( self ):
"""simple docstring"""
return 1e-3
| 283 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = XGLMTokenizer
_UpperCAmelCase : List[Any] = XGLMTokenizerFast
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : Tuple = True
def _SCREAMING_SNAKE_CASE ( self : Tuple):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_: List[Any] = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
tokenizer.save_pretrained(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Optional[Any] = "<pad>"
SCREAMING_SNAKE_CASE_: int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__) , lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__) , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Optional[int] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<s>")
self.assertEqual(vocab_keys[1] , "<pad>")
self.assertEqual(len(lowerCAmelCase__) , 1008)
def _SCREAMING_SNAKE_CASE ( self : Any):
self.assertEqual(self.get_tokenizer().vocab_size , 1008)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Optional[int] = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer.tokenize("This is a test")
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE_: List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
SCREAMING_SNAKE_CASE_: Optional[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Any):
return XGLMTokenizer.from_pretrained("facebook/xglm-564M")
def _SCREAMING_SNAKE_CASE ( self : str):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase__ , f.name)
SCREAMING_SNAKE_CASE_: Tuple = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = pickle.dumps(lowerCAmelCase__)
pickle.loads(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : str):
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_: Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE_: List[str] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_: Any = "I was born in 92000, and this is falsé."
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_: str = tokenizer.encode(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: Dict = "Hello World!"
SCREAMING_SNAKE_CASE_: Union[str, Any] = [2, 3_1227, 4447, 35]
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__))
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Union[str, Any] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
SCREAMING_SNAKE_CASE_: Optional[Any] = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__))
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
# fmt: off
SCREAMING_SNAKE_CASE_: str = {
"input_ids": [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="facebook/xglm-564M" , padding=lowerCAmelCase__ , )
| 13 | 0 |
'''simple docstring'''
import pytest
import datasets
# Import fixture modules as plugins
lowerCamelCase = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def _A ( _lowerCAmelCase ):
"""simple docstring"""
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=_UpperCAmelCase )
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =tmp_path_factory.getbasetemp() / "cache"
__lowercase =test_hf_cache_home / "datasets"
__lowercase =test_hf_cache_home / "metrics"
__lowercase =test_hf_cache_home / "modules"
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_UpperCAmelCase ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_UpperCAmelCase ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_UpperCAmelCase ) )
__lowercase =test_hf_datasets_cache / "downloads"
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_UpperCAmelCase ) )
__lowercase =test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_UpperCAmelCase ) )
@pytest.fixture(autouse=_UpperCAmelCase , scope='session' )
def _A ( ):
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_UpperCAmelCase )
def _A ( _lowerCAmelCase ):
"""simple docstring"""
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _UpperCAmelCase )
@pytest.fixture
def _A ( _lowerCAmelCase ):
"""simple docstring"""
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _UpperCAmelCase )
| 166 |
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_: Optional[int] = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
SCREAMING_SNAKE_CASE_: int = 1 - (matter_density + radiation_density + dark_energy)
SCREAMING_SNAKE_CASE_: Dict = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
SCREAMING_SNAKE_CASE_: Any = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
lowerCAmelCase : List[Any] = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 13 | 0 |
from sklearn.metrics import recall_score
import datasets
A : Optional[int] = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
A : int = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
A : Tuple = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A (datasets.Metric ):
'''simple docstring'''
def a_ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def a_ ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : str=1 , __lowerCAmelCase : List[str]="binary" , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : str="warn" , ) -> Optional[Any]:
"""simple docstring"""
A__ = recall_score(
lowerCAmelCase__ , lowerCAmelCase__ , labels=lowerCAmelCase__ , pos_label=lowerCAmelCase__ , average=lowerCAmelCase__ , sample_weight=lowerCAmelCase__ , zero_division=lowerCAmelCase__ , )
return {"recall": float(lowerCAmelCase__ ) if score.size == 1 else score}
| 274 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
lowerCAmelCase : int = logging.get_logger(__name__)
# General docstring
lowerCAmelCase : int = """MobileNetV1Config"""
# Base docstring
lowerCAmelCase : List[Any] = """google/mobilenet_v1_1.0_224"""
lowerCAmelCase : Dict = [1, 1024, 7, 7]
# Image classification docstring
lowerCAmelCase : Union[str, Any] = """google/mobilenet_v1_1.0_224"""
lowerCAmelCase : Any = """tabby, tabby cat"""
lowerCAmelCase : List[Any] = [
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
SCREAMING_SNAKE_CASE_: List[str] = {}
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = model.mobilenet_va
else:
SCREAMING_SNAKE_CASE_: int = model
SCREAMING_SNAKE_CASE_: Dict = "MobilenetV1/Conv2d_0/"
SCREAMING_SNAKE_CASE_: str = backbone.conv_stem.convolution.weight
SCREAMING_SNAKE_CASE_: List[str] = backbone.conv_stem.normalization.bias
SCREAMING_SNAKE_CASE_: int = backbone.conv_stem.normalization.weight
SCREAMING_SNAKE_CASE_: List[str] = backbone.conv_stem.normalization.running_mean
SCREAMING_SNAKE_CASE_: Optional[int] = backbone.conv_stem.normalization.running_var
for i in range(13 ):
SCREAMING_SNAKE_CASE_: List[str] = i + 1
SCREAMING_SNAKE_CASE_: Optional[int] = i * 2
SCREAMING_SNAKE_CASE_: Any = backbone.layer[pt_index]
SCREAMING_SNAKE_CASE_: Any = f"MobilenetV1/Conv2d_{tf_index}_depthwise/"
SCREAMING_SNAKE_CASE_: Any = pointer.convolution.weight
SCREAMING_SNAKE_CASE_: Any = pointer.normalization.bias
SCREAMING_SNAKE_CASE_: str = pointer.normalization.weight
SCREAMING_SNAKE_CASE_: Dict = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE_: Optional[Any] = pointer.normalization.running_var
SCREAMING_SNAKE_CASE_: Tuple = backbone.layer[pt_index + 1]
SCREAMING_SNAKE_CASE_: List[str] = f"MobilenetV1/Conv2d_{tf_index}_pointwise/"
SCREAMING_SNAKE_CASE_: int = pointer.convolution.weight
SCREAMING_SNAKE_CASE_: Any = pointer.normalization.bias
SCREAMING_SNAKE_CASE_: Optional[int] = pointer.normalization.weight
SCREAMING_SNAKE_CASE_: Optional[Any] = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE_: Dict = pointer.normalization.running_var
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = "MobilenetV1/Logits/Conv2d_1c_1x1/"
SCREAMING_SNAKE_CASE_: Optional[Any] = model.classifier.weight
SCREAMING_SNAKE_CASE_: Tuple = model.classifier.bias
return tf_to_pt_map
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions." )
raise
# Load weights from TF model
SCREAMING_SNAKE_CASE_: int = tf.train.list_variables(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = {}
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}" )
SCREAMING_SNAKE_CASE_: Any = tf.train.load_variable(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = array
# Build TF to PyTorch weights loading map
SCREAMING_SNAKE_CASE_: Optional[Any] = _build_tf_to_pytorch_map(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for name, pointer in tf_to_pt_map.items():
logger.info(f"Importing {name}" )
if name not in tf_weights:
logger.info(f"{name} not in tf pre-trained weights, skipping" )
continue
SCREAMING_SNAKE_CASE_: int = tf_weights[name]
if "depthwise_weights" in name:
logger.info("Transposing depthwise" )
SCREAMING_SNAKE_CASE_: int = np.transpose(_UpperCAmelCase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("Transposing" )
if len(pointer.shape ) == 2: # copying into linear layer
SCREAMING_SNAKE_CASE_: List[str] = array.squeeze().transpose()
else:
SCREAMING_SNAKE_CASE_: Any = np.transpose(_UpperCAmelCase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" )
logger.info(f"Initialize PyTorch weight {name} {array.shape}" )
SCREAMING_SNAKE_CASE_: int = torch.from_numpy(_UpperCAmelCase )
tf_weights.pop(_UpperCAmelCase , _UpperCAmelCase )
tf_weights.pop(name + "/RMSProp" , _UpperCAmelCase )
tf_weights.pop(name + "/RMSProp_1" , _UpperCAmelCase )
tf_weights.pop(name + "/ExponentialMovingAverage" , _UpperCAmelCase )
logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" )
return model
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = features.shape[-2:]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = conv_layer.stride
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = conv_layer.kernel_size
if in_height % stride_height == 0:
SCREAMING_SNAKE_CASE_: int = max(kernel_height - stride_height , 0 )
else:
SCREAMING_SNAKE_CASE_: Tuple = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
SCREAMING_SNAKE_CASE_: str = max(kernel_width - stride_width , 0 )
else:
SCREAMING_SNAKE_CASE_: Dict = max(kernel_width - (in_width % stride_width) , 0 )
SCREAMING_SNAKE_CASE_: str = pad_along_width // 2
SCREAMING_SNAKE_CASE_: Union[str, Any] = pad_along_width - pad_left
SCREAMING_SNAKE_CASE_: int = pad_along_height // 2
SCREAMING_SNAKE_CASE_: Tuple = pad_along_height - pad_top
SCREAMING_SNAKE_CASE_: Union[str, Any] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_UpperCAmelCase , _UpperCAmelCase , "constant" , 0.0 )
class __lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : MobileNetVaConfig , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[bool or str] = True , ):
super().__init__()
SCREAMING_SNAKE_CASE_: Optional[int] = config
if in_channels % groups != 0:
raise ValueError(F"Input channels ({in_channels}) are not divisible by {groups} groups.")
if out_channels % groups != 0:
raise ValueError(F"Output channels ({out_channels}) are not divisible by {groups} groups.")
SCREAMING_SNAKE_CASE_: int = 0 if config.tf_padding else int((kernel_size - 1) / 2)
SCREAMING_SNAKE_CASE_: Union[str, Any] = nn.Convad(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=lowerCAmelCase__ , groups=lowerCAmelCase__ , bias=lowerCAmelCase__ , padding_mode="zeros" , )
if use_normalization:
SCREAMING_SNAKE_CASE_: str = nn.BatchNormad(
num_features=lowerCAmelCase__ , eps=config.layer_norm_eps , momentum=0.9997 , affine=lowerCAmelCase__ , track_running_stats=lowerCAmelCase__ , )
else:
SCREAMING_SNAKE_CASE_: str = None
if use_activation:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Dict = ACTaFN[use_activation]
elif isinstance(config.hidden_act , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Dict = ACTaFN[config.hidden_act]
else:
SCREAMING_SNAKE_CASE_: Any = config.hidden_act
else:
SCREAMING_SNAKE_CASE_: int = None
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : torch.Tensor):
if self.config.tf_padding:
SCREAMING_SNAKE_CASE_: Union[str, Any] = apply_tf_padding(lowerCAmelCase__ , self.convolution)
SCREAMING_SNAKE_CASE_: Optional[int] = self.convolution(lowerCAmelCase__)
if self.normalization is not None:
SCREAMING_SNAKE_CASE_: int = self.normalization(lowerCAmelCase__)
if self.activation is not None:
SCREAMING_SNAKE_CASE_: List[Any] = self.activation(lowerCAmelCase__)
return features
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : List[str] = MobileNetVaConfig
_UpperCAmelCase : List[Any] = load_tf_weights_in_mobilenet_va
_UpperCAmelCase : List[Any] = '''mobilenet_v1'''
_UpperCAmelCase : int = '''pixel_values'''
_UpperCAmelCase : List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Union[nn.Linear, nn.Convad]):
if isinstance(lowerCAmelCase__ , (nn.Linear, nn.Convad)):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowerCAmelCase__ , nn.BatchNormad):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
lowerCAmelCase : Any = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowerCAmelCase : List[str] = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , UpperCAmelCase_ , )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase__ : MobileNetVaConfig , lowerCAmelCase__ : bool = True):
super().__init__(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = config
SCREAMING_SNAKE_CASE_: Union[str, Any] = 32
SCREAMING_SNAKE_CASE_: Dict = max(int(depth * config.depth_multiplier) , config.min_depth)
SCREAMING_SNAKE_CASE_: Tuple = MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=config.num_channels , out_channels=lowerCAmelCase__ , kernel_size=3 , stride=2 , )
SCREAMING_SNAKE_CASE_: Optional[int] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
SCREAMING_SNAKE_CASE_: str = nn.ModuleList()
for i in range(13):
SCREAMING_SNAKE_CASE_: List[Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
SCREAMING_SNAKE_CASE_: str = max(int(depth * config.depth_multiplier) , config.min_depth)
self.layer.append(
MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=3 , stride=strides[i] , groups=lowerCAmelCase__ , ))
self.layer.append(
MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=1 , ))
SCREAMING_SNAKE_CASE_: List[str] = nn.AdaptiveAvgPoolad((1, 1)) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : str):
raise NotImplementedError
@add_start_docstrings_to_model_forward(lowerCAmelCase__)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE_: Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_: Any = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
SCREAMING_SNAKE_CASE_: Optional[Any] = self.conv_stem(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
SCREAMING_SNAKE_CASE_: Tuple = layer_module(lowerCAmelCase__)
if output_hidden_states:
SCREAMING_SNAKE_CASE_: Optional[int] = all_hidden_states + (hidden_states,)
SCREAMING_SNAKE_CASE_: Optional[Any] = hidden_states
if self.pooler is not None:
SCREAMING_SNAKE_CASE_: int = torch.flatten(self.pooler(lowerCAmelCase__) , start_dim=1)
else:
SCREAMING_SNAKE_CASE_: List[str] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None)
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ , )
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , UpperCAmelCase_ , )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : MobileNetVaConfig):
super().__init__(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = config.num_labels
SCREAMING_SNAKE_CASE_: Dict = MobileNetVaModel(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
SCREAMING_SNAKE_CASE_: str = nn.Dropout(config.classifier_dropout_prob , inplace=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = nn.Linear(lowerCAmelCase__ , config.num_labels) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE_: List[str] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_: List[str] = self.mobilenet_va(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE_: Tuple = self.classifier(self.dropout(lowerCAmelCase__))
SCREAMING_SNAKE_CASE_: Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE_: List[Any] = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE_: int = "single_label_classification"
else:
SCREAMING_SNAKE_CASE_: str = "multi_label_classification"
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE_: Dict = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE_: Any = loss_fct(logits.squeeze() , labels.squeeze())
else:
SCREAMING_SNAKE_CASE_: int = loss_fct(lowerCAmelCase__ , lowerCAmelCase__)
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE_: Any = CrossEntropyLoss()
SCREAMING_SNAKE_CASE_: Dict = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE_: Dict = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE_: Dict = loss_fct(lowerCAmelCase__ , lowerCAmelCase__)
if not return_dict:
SCREAMING_SNAKE_CASE_: int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states , )
| 13 | 0 |
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=1_3 , snake_case=3_2 , snake_case=2 , snake_case=3 , snake_case=1_6 , snake_case=[1, 2, 1] , snake_case=[2, 2, 4] , snake_case=2 , snake_case=2.0 , snake_case=True , snake_case=0.0 , snake_case=0.0 , snake_case=0.1 , snake_case="gelu" , snake_case=False , snake_case=True , snake_case=0.02 , snake_case=1e-5 , snake_case=True , snake_case=None , snake_case=True , snake_case=1_0 , snake_case=8 , snake_case=["stage1", "stage2", "stage3"] , snake_case=[1, 2, 3] , ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : Optional[int] = batch_size
UpperCAmelCase : List[str] = image_size
UpperCAmelCase : int = patch_size
UpperCAmelCase : Optional[int] = num_channels
UpperCAmelCase : Optional[Any] = embed_dim
UpperCAmelCase : List[str] = depths
UpperCAmelCase : Any = num_heads
UpperCAmelCase : Any = window_size
UpperCAmelCase : str = mlp_ratio
UpperCAmelCase : Optional[Any] = qkv_bias
UpperCAmelCase : Any = hidden_dropout_prob
UpperCAmelCase : Any = attention_probs_dropout_prob
UpperCAmelCase : Optional[int] = drop_path_rate
UpperCAmelCase : Union[str, Any] = hidden_act
UpperCAmelCase : int = use_absolute_embeddings
UpperCAmelCase : Tuple = patch_norm
UpperCAmelCase : Union[str, Any] = layer_norm_eps
UpperCAmelCase : Union[str, Any] = initializer_range
UpperCAmelCase : Optional[Any] = is_training
UpperCAmelCase : List[str] = scope
UpperCAmelCase : List[Any] = use_labels
UpperCAmelCase : Any = type_sequence_label_size
UpperCAmelCase : int = encoder_stride
UpperCAmelCase : List[str] = out_features
UpperCAmelCase : Optional[Any] = out_indices
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : List[str] = None
if self.use_labels:
UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def A_ ( self ):
'''simple docstring'''
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = MaskFormerSwinModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase : Tuple = model(lowerCAmelCase__ )
UpperCAmelCase : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = MaskFormerSwinBackbone(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase : List[Any] = model(lowerCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(lowerCAmelCase__ ):
UpperCAmelCase : Optional[Any] = ["stem"]
UpperCAmelCase : Dict = MaskFormerSwinBackbone(config=lowerCAmelCase__ )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase : Optional[int] = config_and_inputs
UpperCAmelCase : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : List[Any] = {'''feature-extraction''': MaskFormerSwinModel} if is_torch_available() else {}
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : Optional[int] = False
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = MaskFormerSwinModelTester(self )
UpperCAmelCase : Tuple = ConfigTester(self , config_class=lowerCAmelCase__ , embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
) )
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A_ ( self ):
'''simple docstring'''
return
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCAmelCase__ )
@unittest.skip("Swin does not use inputs_embeds" )
def A_ ( self ):
'''simple docstring'''
pass
@unittest.skip("Swin does not support feedforward chunking" )
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Tuple = model_class(lowerCAmelCase__ )
UpperCAmelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : List[Any] = [*signature.parameters.keys()]
UpperCAmelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" )
def A_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" )
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self , snake_case , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase : List[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
UpperCAmelCase : List[Any] = outputs.hidden_states
UpperCAmelCase : int = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# Swin has a different seq_length
UpperCAmelCase : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : Optional[Any] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : int = 3
UpperCAmelCase : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase : Union[str, Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[Any] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : Optional[Any] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width) )
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" )
def A_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def A_ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(snake_case ):
UpperCAmelCase : Optional[int] = 0
return t
def check_equivalence(snake_case , snake_case , snake_case , snake_case={} ):
with torch.no_grad():
UpperCAmelCase : Optional[Any] = model(**lowerCAmelCase__ , return_dict=lowerCAmelCase__ , **lowerCAmelCase__ )
UpperCAmelCase : Union[str, Any] = model(**lowerCAmelCase__ , return_dict=lowerCAmelCase__ , **lowerCAmelCase__ ).to_tuple()
def recursive_check(snake_case , snake_case ):
if isinstance(lowerCAmelCase__ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
recursive_check(lowerCAmelCase__ , lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(lowerCAmelCase__ , lowerCAmelCase__ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(lowerCAmelCase__ ) , set_nan_tensor_to_zero(lowerCAmelCase__ ) , atol=1e-5 ) , msg=(
"Tuple and dict output are not equal. Difference:"
f" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
f" {torch.isnan(lowerCAmelCase__ ).any()} and `inf`: {torch.isinf(lowerCAmelCase__ )}. Dict has"
f" `nan`: {torch.isnan(lowerCAmelCase__ ).any()} and `inf`: {torch.isinf(lowerCAmelCase__ )}."
) , )
recursive_check(lowerCAmelCase__ , lowerCAmelCase__ )
for model_class in self.all_model_classes:
UpperCAmelCase : Optional[int] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase : Optional[int] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase : str = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
check_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase : Any = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
UpperCAmelCase : str = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
check_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase : Optional[Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase : Dict = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
check_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , {"output_hidden_states": True} )
UpperCAmelCase : List[Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
UpperCAmelCase : Any = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
check_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , {"output_hidden_states": True} )
@require_torch
class UpperCamelCase__ ( unittest.TestCase , UpperCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : str = MaskFormerSwinConfig
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[Any] = MaskFormerSwinModelTester(self )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Any = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
UpperCAmelCase : List[str] = backbone_class(lowerCAmelCase__ )
backbone.to(lowerCAmelCase__ )
backbone.eval()
UpperCAmelCase : Any = backbone(**lowerCAmelCase__ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , lowerCAmelCase__ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
UpperCAmelCase : str = backbone(**lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
UpperCAmelCase : List[Any] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
UpperCAmelCase : Dict = backbone(**lowerCAmelCase__ , output_attentions=lowerCAmelCase__ )
self.assertIsNotNone(outputs.attentions )
| 311 |
def A_ ( _UpperCAmelCase , _UpperCAmelCase = False ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = f"Expected string as input, found {type(_UpperCAmelCase )}"
raise ValueError(_UpperCAmelCase )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = f"Expected boolean as use_pascal parameter, found {type(_UpperCAmelCase )}"
raise ValueError(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple = input_str.split("_" )
SCREAMING_SNAKE_CASE_: str = 0 if use_pascal else 1
SCREAMING_SNAKE_CASE_: int = words[start_index:]
SCREAMING_SNAKE_CASE_: List[str] = [word[0].upper() + word[1:] for word in words_to_capitalize]
SCREAMING_SNAKE_CASE_: List[Any] = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 13 | 0 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("dataset_size", [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("input_in_memory_max_size", ["default", 0, 100 * 2**20, 900 * 2**20] )
def lowerCAmelCase_ ( __A, __A, __A ) -> Optional[int]:
'''simple docstring'''
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config, "IN_MEMORY_MAX_SIZE", _UpperCAmelCase )
UpperCAmelCase__ = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
UpperCAmelCase__ = dataset_size < in_memory_max_size
else:
UpperCAmelCase__ = False
UpperCAmelCase__ = is_small_dataset(_UpperCAmelCase )
assert result == expected
| 65 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def A_ ( _UpperCAmelCase , _UpperCAmelCase=10 ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = []
for _ in range(_UpperCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def A_ ( _UpperCAmelCase , _UpperCAmelCase=10 ):
SCREAMING_SNAKE_CASE_: List[str] = []
for step in range(_UpperCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_: Optional[int] = os.path.join(_UpperCAmelCase , "schedule.bin" )
torch.save(scheduler.state_dict() , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.load(_UpperCAmelCase )
scheduler.load_state_dict(_UpperCAmelCase )
return lrs
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple):
self.assertEqual(len(lowerCAmelCase__) , len(lowerCAmelCase__))
for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__):
self.assertAlmostEqual(lowerCAmelCase__ , lowerCAmelCase__ , delta=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = torch.tensor([0.4, 0.2, -0.5])
SCREAMING_SNAKE_CASE_: Optional[Any] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
SCREAMING_SNAKE_CASE_: int = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0)
for _ in range(100):
SCREAMING_SNAKE_CASE_: Dict = criterion(lowerCAmelCase__ , lowerCAmelCase__)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2)
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = torch.tensor([0.4, 0.2, -0.5])
SCREAMING_SNAKE_CASE_: Any = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
SCREAMING_SNAKE_CASE_: int = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowerCAmelCase__ , weight_decay=0.0 , relative_step=lowerCAmelCase__ , scale_parameter=lowerCAmelCase__ , warmup_init=lowerCAmelCase__ , )
for _ in range(1000):
SCREAMING_SNAKE_CASE_: List[Any] = criterion(lowerCAmelCase__ , lowerCAmelCase__)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2)
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = nn.Linear(50 , 50 ) if is_torch_available() else None
_UpperCAmelCase : List[Any] = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
_UpperCAmelCase : Optional[Any] = 10
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any]=None):
self.assertEqual(len(lowerCAmelCase__) , len(lowerCAmelCase__))
for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__):
self.assertAlmostEqual(lowerCAmelCase__ , lowerCAmelCase__ , delta=lowerCAmelCase__ , msg=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Dict = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
SCREAMING_SNAKE_CASE_: Dict = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = data
SCREAMING_SNAKE_CASE_: List[Any] = scheduler_func(self.optimizer , **lowerCAmelCase__)
self.assertEqual(len([scheduler.get_lr()[0]]) , 1)
SCREAMING_SNAKE_CASE_: int = unwrap_schedule(lowerCAmelCase__ , self.num_steps)
self.assertListAlmostEqual(
lowerCAmelCase__ , lowerCAmelCase__ , tol=1E-2 , msg=F"failed for {scheduler_func} in normal scheduler" , )
SCREAMING_SNAKE_CASE_: List[str] = scheduler_func(self.optimizer , **lowerCAmelCase__)
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(lowerCAmelCase__) # wrap to test picklability of the schedule
SCREAMING_SNAKE_CASE_: Tuple = unwrap_and_save_reload_schedule(lowerCAmelCase__ , self.num_steps)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ , msg=F"failed for {scheduler_func} in save and reload")
class __lowercase :
"""simple docstring"""
def __init__( self : str , lowerCAmelCase__ : List[str]):
SCREAMING_SNAKE_CASE_: List[Any] = fn
def __call__( self : Optional[int] , *lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : Tuple):
return self.fn(*lowerCAmelCase__ , **lowerCAmelCase__)
@classmethod
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: str = list(map(self , scheduler.lr_lambdas))
| 13 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A ={"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =["""ViTFeatureExtractor"""]
__A =["""ViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
"""VIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTForImageClassification""",
"""ViTForMaskedImageModeling""",
"""ViTModel""",
"""ViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
"""TFViTForImageClassification""",
"""TFViTModel""",
"""TFViTPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
"""FlaxViTForImageClassification""",
"""FlaxViTModel""",
"""FlaxViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__A =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 226 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase_ )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : str = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_UpperCAmelCase : ClassVar[Features] = Features({'''audio''': Audio()} )
_UpperCAmelCase : ClassVar[Features] = Features({'''transcription''': Value('''string''' )} )
_UpperCAmelCase : str = "audio"
_UpperCAmelCase : str = "transcription"
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int):
if self.audio_column not in features:
raise ValueError(F"Column {self.audio_column} is not present in features.")
if not isinstance(features[self.audio_column] , lowerCAmelCase__):
raise ValueError(F"Column {self.audio_column} is not an Audio type.")
SCREAMING_SNAKE_CASE_: Tuple = copy.deepcopy(self)
SCREAMING_SNAKE_CASE_: Optional[int] = self.input_schema.copy()
SCREAMING_SNAKE_CASE_: Dict = features[self.audio_column]
SCREAMING_SNAKE_CASE_: int = input_schema
return task_template
@property
def _SCREAMING_SNAKE_CASE ( self : int):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 13 | 0 |
import colorsys
from PIL import Image # type: ignore
def lowercase_ (A : Optional[Any] , A : Union[str, Any] , A : Tuple ):
snake_case__ : Dict = x
snake_case__ : Dict = y
for step in range(_UpperCAmelCase ): # noqa: B007
snake_case__ : Union[str, Any] = a * a - b * b + x
snake_case__ : Union[str, Any] = 2 * a * b + y
snake_case__ : List[str] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowercase_ (A : Any ):
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def lowercase_ (A : str ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(_UpperCAmelCase , 1 , 1 ) )
def lowercase_ (A : Any = 8_0_0 , A : Any = 6_0_0 , A : Dict = -0.6 , A : Tuple = 0 , A : Optional[Any] = 3.2 , A : Tuple = 5_0 , A : Union[str, Any] = True , ):
snake_case__ : str = Image.new('RGB' , (image_width, image_height) )
snake_case__ : List[Any] = img.load()
# loop through the image-coordinates
for image_x in range(_UpperCAmelCase ):
for image_y in range(_UpperCAmelCase ):
# determine the figure-coordinates based on the image-coordinates
snake_case__ : Dict = figure_width / image_width * image_height
snake_case__ : Tuple = figure_center_x + (image_x / image_width - 0.5) * figure_width
snake_case__ : Optional[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
snake_case__ : Union[str, Any] = get_distance(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
snake_case__ : int = get_color_coded_rgb(_UpperCAmelCase )
else:
snake_case__ : Optional[int] = get_black_and_white_rgb(_UpperCAmelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
a_ :str = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 277 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: str = jnp.ones((batch_size, length)) / length
return scores
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Dict = None
SCREAMING_SNAKE_CASE_: str = 20
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(batch_size=2 , length=lowerCAmelCase__)
# tweak scores to not be uniform anymore
SCREAMING_SNAKE_CASE_: List[str] = scores.at[1, 5].set((1 / length) + 0.1) # peak, 1st batch
SCREAMING_SNAKE_CASE_: Any = scores.at[1, 10].set((1 / length) - 0.4) # valley, 1st batch
# compute softmax
SCREAMING_SNAKE_CASE_: Dict = jax.nn.softmax(lowerCAmelCase__ , axis=-1)
SCREAMING_SNAKE_CASE_: Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5)
SCREAMING_SNAKE_CASE_: List[str] = FlaxTemperatureLogitsWarper(temperature=1.3)
SCREAMING_SNAKE_CASE_: str = jax.nn.softmax(temp_dist_warper_sharper(lowerCAmelCase__ , scores.copy() , cur_len=lowerCAmelCase__) , axis=-1)
SCREAMING_SNAKE_CASE_: int = jax.nn.softmax(temp_dist_warper_smoother(lowerCAmelCase__ , scores.copy() , cur_len=lowerCAmelCase__) , axis=-1)
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3))
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3))
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max())
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min())
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max())
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min())
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: List[str] = None
SCREAMING_SNAKE_CASE_: str = 10
SCREAMING_SNAKE_CASE_: Tuple = 2
# create ramp distribution
SCREAMING_SNAKE_CASE_: Optional[Any] = np.broadcast_to(np.arange(lowerCAmelCase__)[None, :] , (batch_size, vocab_size)).copy()
SCREAMING_SNAKE_CASE_: Dict = ramp_logits[1:, : vocab_size // 2] + vocab_size
SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxTopKLogitsWarper(3)
SCREAMING_SNAKE_CASE_: Dict = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0]).tolist() , 7 * [True] + 3 * [False])
self.assertListEqual(jnp.isinf(scores[1]).tolist() , 2 * [True] + 3 * [False] + 5 * [True])
# check special case
SCREAMING_SNAKE_CASE_: Any = 5
SCREAMING_SNAKE_CASE_: str = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3)
SCREAMING_SNAKE_CASE_: Any = np.broadcast_to(np.arange(lowerCAmelCase__)[None, :] , (batch_size, length)).copy()
SCREAMING_SNAKE_CASE_: Any = top_k_warp_safety_check(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1).tolist() , [2, 2])
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Tuple = None
SCREAMING_SNAKE_CASE_: Dict = 10
SCREAMING_SNAKE_CASE_: Dict = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
SCREAMING_SNAKE_CASE_: Tuple = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]]))
SCREAMING_SNAKE_CASE_: int = FlaxTopPLogitsWarper(0.8)
SCREAMING_SNAKE_CASE_: Optional[Any] = np.exp(top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__))
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
SCREAMING_SNAKE_CASE_: Dict = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]])
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
# check edge cases with negative and extreme logits
SCREAMING_SNAKE_CASE_: Union[str, Any] = np.broadcast_to(np.arange(lowerCAmelCase__)[None, :] , (batch_size, vocab_size)).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
SCREAMING_SNAKE_CASE_: Dict = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
SCREAMING_SNAKE_CASE_: str = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0)
SCREAMING_SNAKE_CASE_: Any = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1).tolist() , [3, 2])
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Tuple = 20
SCREAMING_SNAKE_CASE_: List[str] = 4
SCREAMING_SNAKE_CASE_: Optional[int] = 0
SCREAMING_SNAKE_CASE_: str = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__)
# check that min length is applied at length 5
SCREAMING_SNAKE_CASE_: str = ids_tensor((batch_size, 20) , vocab_size=20)
SCREAMING_SNAKE_CASE_: int = 5
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = min_dist_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf")])
# check that min length is not applied anymore at length 15
SCREAMING_SNAKE_CASE_: List[str] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = 15
SCREAMING_SNAKE_CASE_: Any = min_dist_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertFalse(jnp.isinf(lowerCAmelCase__).any())
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: int = 20
SCREAMING_SNAKE_CASE_: str = 4
SCREAMING_SNAKE_CASE_: List[Any] = 0
SCREAMING_SNAKE_CASE_: Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__)
# check that all scores are -inf except the bos_token_id score
SCREAMING_SNAKE_CASE_: int = ids_tensor((batch_size, 1) , vocab_size=20)
SCREAMING_SNAKE_CASE_: List[str] = 1
SCREAMING_SNAKE_CASE_: Union[str, Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :]).all())
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0]) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
SCREAMING_SNAKE_CASE_: List[Any] = 3
SCREAMING_SNAKE_CASE_: Optional[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertFalse(jnp.isinf(lowerCAmelCase__).any())
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Any = 20
SCREAMING_SNAKE_CASE_: Optional[Any] = 4
SCREAMING_SNAKE_CASE_: Dict = 0
SCREAMING_SNAKE_CASE_: List[Any] = 5
SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__)
# check that all scores are -inf except the eos_token_id when max_length is reached
SCREAMING_SNAKE_CASE_: List[Any] = ids_tensor((batch_size, 4) , vocab_size=20)
SCREAMING_SNAKE_CASE_: Optional[int] = 4
SCREAMING_SNAKE_CASE_: Dict = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :]).all())
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0]) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
SCREAMING_SNAKE_CASE_: List[str] = 3
SCREAMING_SNAKE_CASE_: str = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertFalse(jnp.isinf(lowerCAmelCase__).any())
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: int = 4
SCREAMING_SNAKE_CASE_: List[Any] = 10
SCREAMING_SNAKE_CASE_: int = 15
SCREAMING_SNAKE_CASE_: Dict = 2
SCREAMING_SNAKE_CASE_: int = 1
SCREAMING_SNAKE_CASE_: List[Any] = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_: int = ids_tensor((batch_size, sequence_length) , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = input_ids.copy()
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_: Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5)
SCREAMING_SNAKE_CASE_: Tuple = FlaxTopKLogitsWarper(3)
SCREAMING_SNAKE_CASE_: Optional[int] = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
SCREAMING_SNAKE_CASE_: Optional[int] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = 10
# no processor list
SCREAMING_SNAKE_CASE_: Dict = temp_dist_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = min_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = bos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = eos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# with processor list
SCREAMING_SNAKE_CASE_: str = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
SCREAMING_SNAKE_CASE_: Tuple = processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# scores should be equal
self.assertTrue(jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Optional[int] = 4
SCREAMING_SNAKE_CASE_: int = 10
SCREAMING_SNAKE_CASE_: List[str] = 15
SCREAMING_SNAKE_CASE_: List[Any] = 2
SCREAMING_SNAKE_CASE_: Union[str, Any] = 1
SCREAMING_SNAKE_CASE_: str = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_: Tuple = ids_tensor((batch_size, sequence_length) , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = input_ids.copy()
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_: Dict = FlaxTemperatureLogitsWarper(temperature=0.5)
SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxTopKLogitsWarper(3)
SCREAMING_SNAKE_CASE_: Dict = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
SCREAMING_SNAKE_CASE_: int = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = 10
# no processor list
def run_no_processor_list(lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Any = temp_dist_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = min_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = bos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = eos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
return scores
# with processor list
def run_processor_list(lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: List[str] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
SCREAMING_SNAKE_CASE_: Dict = processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
return scores
SCREAMING_SNAKE_CASE_: str = jax.jit(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = jax.jit(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = jitted_run_no_processor_list(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = jitted_run_processor_list(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
# scores should be equal
self.assertTrue(jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
| 13 | 0 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowercase : Optional[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCAmelCase__ )
lowercase : int = -1
lowercase : Optional[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCAmelCase__ )
lowercase : List[str] = model.generate(lowerCAmelCase__ ,max_new_tokens=10 ,do_sample=lowerCAmelCase__ )
lowercase : int = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowercase : List[Any] = TextStreamer(lowerCAmelCase__ )
model.generate(lowerCAmelCase__ ,max_new_tokens=10 ,do_sample=lowerCAmelCase__ ,streamer=lowerCAmelCase__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase : int = cs.out[:-1]
self.assertEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowercase : List[str] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCAmelCase__ )
lowercase : int = -1
lowercase : Dict = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCAmelCase__ )
lowercase : Optional[Any] = model.generate(lowerCAmelCase__ ,max_new_tokens=10 ,do_sample=lowerCAmelCase__ )
lowercase : Any = tokenizer.decode(greedy_ids[0] )
lowercase : int = TextIteratorStreamer(lowerCAmelCase__ )
lowercase : str = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
lowercase : Optional[int] = Thread(target=model.generate ,kwargs=lowerCAmelCase__ )
thread.start()
lowercase : int = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowercase : Dict = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCAmelCase__ )
lowercase : Optional[int] = -1
lowercase : Dict = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCAmelCase__ )
lowercase : Optional[Any] = model.generate(lowerCAmelCase__ ,max_new_tokens=10 ,do_sample=lowerCAmelCase__ )
lowercase : Dict = greedy_ids[:, input_ids.shape[1] :]
lowercase : str = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowercase : Dict = TextStreamer(lowerCAmelCase__ ,skip_prompt=lowerCAmelCase__ )
model.generate(lowerCAmelCase__ ,max_new_tokens=10 ,do_sample=lowerCAmelCase__ ,streamer=lowerCAmelCase__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase : Union[str, Any] = cs.out[:-1]
self.assertEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = AutoTokenizer.from_pretrained("""distilgpt2""" )
lowercase : str = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(lowerCAmelCase__ )
lowercase : List[str] = -1
lowercase : Tuple = torch.ones((1, 5) ,device=lowerCAmelCase__ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowercase : Optional[Any] = TextStreamer(lowerCAmelCase__ ,skip_special_tokens=lowerCAmelCase__ )
model.generate(lowerCAmelCase__ ,max_new_tokens=1 ,do_sample=lowerCAmelCase__ ,streamer=lowerCAmelCase__ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowercase : List[str] = cs.out[:-1] # Remove the final "\n"
lowercase : Any = tokenizer(lowerCAmelCase__ ,return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape ,(1, 1) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowercase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCAmelCase__ )
lowercase : Union[str, Any] = -1
lowercase : List[Any] = ids_tensor((1, 5) ,vocab_size=model.config.vocab_size ).to(lowerCAmelCase__ )
lowercase : int = TextIteratorStreamer(lowerCAmelCase__ ,timeout=0.001 )
lowercase : Optional[int] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
lowercase : List[Any] = Thread(target=model.generate ,kwargs=lowerCAmelCase__ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowerCAmelCase__ ):
lowercase : List[Any] = ""
for new_text in streamer:
streamer_text += new_text
| 20 |
import math
import sys
def A_ ( _UpperCAmelCase ):
if number != int(_UpperCAmelCase ):
raise ValueError("the value of input must be a natural number" )
if number < 0:
raise ValueError("the value of input must not be a negative number" )
if number == 0:
return 1
SCREAMING_SNAKE_CASE_: List[str] = [-1] * (number + 1)
SCREAMING_SNAKE_CASE_: str = 0
for i in range(1 , number + 1 ):
SCREAMING_SNAKE_CASE_: str = sys.maxsize
SCREAMING_SNAKE_CASE_: List[Any] = int(math.sqrt(_UpperCAmelCase ) )
for j in range(1 , root + 1 ):
SCREAMING_SNAKE_CASE_: List[str] = 1 + answers[i - (j**2)]
SCREAMING_SNAKE_CASE_: Optional[Any] = min(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class __snake_case ( UpperCAmelCase_ ):
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase__ ):
UpperCAmelCase : Optional[int] =pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase__ ):
UpperCAmelCase : Dict =pa.array(TypedSequence([1, 2, 3] , try_type=Value('''bool''' ) , type=Value('''int64''' ) ) )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Dict =pa.array(TypedSequence([1, 2, 3] , type=Value('''int32''' ) ) )
self.assertEqual(arr.type , pa.intaa() )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
UpperCAmelCase : List[str] =pa.array(TypedSequence(['''foo''', '''bar'''] , type=Value('''int64''' ) ) )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] =pa.array(TypedSequence([1, 2, 3] , try_type=Value('''int32''' ) ) )
self.assertEqual(arr.type , pa.intaa() )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Dict =pa.array(TypedSequence(['''foo''', '''bar'''] , try_type=Value('''int64''' ) ) )
self.assertEqual(arr.type , pa.string() )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : int =pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , '''int64''' ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , '''int64''' ) )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
UpperCAmelCase : List[str] =pa.array(TypedSequence(['''foo''', '''bar'''] , type=ArrayaD((1, 3) , '''int64''' ) ) )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Optional[int] =pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , '''int64''' ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , '''int64''' ) )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[Any] =pa.array(TypedSequence(['''foo''', '''bar'''] , try_type=ArrayaD((1, 3) , '''int64''' ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
import PIL.Image
UpperCAmelCase : Tuple =PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
'''datasets.arrow_writer.cast_to_python_objects''' , side_effect=lowerCAmelCase__ ) as mock_cast_to_python_objects:
UpperCAmelCase : Tuple =pa.array(TypedSequence([{'''path''': None, '''bytes''': b'''image_bytes'''}, pil_image] , type=Image() ) )
UpperCAmelCase : Optional[int] =mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('''optimize_list_casting''' , lowerCAmelCase__ )
self.assertFalse(kwargs['''optimize_list_casting'''] )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Tuple:
'''simple docstring'''
UpperCAmelCase : Dict =pa.BufferReader(_UpperCAmelCase ) if isinstance(_UpperCAmelCase , pa.Buffer ) else pa.memory_map(_UpperCAmelCase )
UpperCAmelCase : Optional[int] =pa.ipc.open_stream(_UpperCAmelCase )
UpperCAmelCase : pa.Table =f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Any:
'''simple docstring'''
UpperCAmelCase : Optional[int] =pa.BufferOutputStream()
UpperCAmelCase : Optional[Any] =pa.schema(_UpperCAmelCase ) if fields else None
with ArrowWriter(stream=_UpperCAmelCase , schema=_UpperCAmelCase , writer_batch_size=_UpperCAmelCase ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
UpperCAmelCase : Union[str, Any] =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCAmelCase : str ={"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(_UpperCAmelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def lowerCAmelCase_ ( )-> Tuple:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =pa.BufferOutputStream()
UpperCAmelCase : int =Features({'''labels''': ClassLabel(names=['''neg''', '''pos'''] )} )
with ArrowWriter(stream=_UpperCAmelCase , features=_UpperCAmelCase ) as writer:
writer.write({'''labels''': 0} )
writer.write({'''labels''': 1} )
UpperCAmelCase : Optional[Any] =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
UpperCAmelCase : Any =pa.BufferReader(output.getvalue() )
UpperCAmelCase : Optional[int] =pa.ipc.open_stream(_UpperCAmelCase )
UpperCAmelCase : pa.Table =f.read_all()
UpperCAmelCase : str =pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(_UpperCAmelCase )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
def lowerCAmelCase_ ( __lowerCAmelCase )-> str:
'''simple docstring'''
UpperCAmelCase : str =pa.BufferOutputStream()
with ArrowWriter(
stream=_UpperCAmelCase , writer_batch_size=_UpperCAmelCase , hash_salt='''split_name''' , check_duplicates=_UpperCAmelCase , ) as writer:
with pytest.raises(_UpperCAmelCase ):
writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=[1, 2] )
UpperCAmelCase : Union[str, Any] =writer.finalize()
@pytest.mark.parametrize('''writer_batch_size''' , [None, 2, 10] )
def lowerCAmelCase_ ( __lowerCAmelCase )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Tuple =pa.BufferOutputStream()
with ArrowWriter(
stream=_UpperCAmelCase , writer_batch_size=_UpperCAmelCase , hash_salt='''split_name''' , check_duplicates=_UpperCAmelCase , ) as writer:
with pytest.raises(_UpperCAmelCase ):
writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=10 )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} , key=10 )
UpperCAmelCase : Tuple =writer.finalize()
@pytest.mark.parametrize('''writer_batch_size''' , [None, 2, 10] )
def lowerCAmelCase_ ( __lowerCAmelCase )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase : int =pa.BufferOutputStream()
with ArrowWriter(
stream=_UpperCAmelCase , writer_batch_size=_UpperCAmelCase , hash_salt='''split_name''' , check_duplicates=_UpperCAmelCase , ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=1 )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} , key=2 )
UpperCAmelCase : Optional[Any] =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Tuple:
'''simple docstring'''
UpperCAmelCase : Tuple =pa.BufferOutputStream()
UpperCAmelCase : List[Any] =pa.schema(_UpperCAmelCase ) if fields else None
with ArrowWriter(stream=_UpperCAmelCase , schema=_UpperCAmelCase , writer_batch_size=_UpperCAmelCase ) as writer:
writer.write_batch({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} )
writer.write_batch({'''col_1''': [], '''col_2''': []} )
UpperCAmelCase : Union[str, Any] =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCAmelCase : List[str] ={"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(_UpperCAmelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str:
'''simple docstring'''
UpperCAmelCase : Any =pa.BufferOutputStream()
UpperCAmelCase : str =pa.schema(_UpperCAmelCase ) if fields else None
with ArrowWriter(stream=_UpperCAmelCase , schema=_UpperCAmelCase , writer_batch_size=_UpperCAmelCase ) as writer:
writer.write_table(pa.Table.from_pydict({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} ) )
UpperCAmelCase : Optional[Any] =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCAmelCase : Dict ={"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(_UpperCAmelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Tuple =pa.BufferOutputStream()
UpperCAmelCase : Any =pa.schema(_UpperCAmelCase ) if fields else None
with ArrowWriter(stream=_UpperCAmelCase , schema=_UpperCAmelCase , writer_batch_size=_UpperCAmelCase ) as writer:
writer.write_row(pa.Table.from_pydict({'''col_1''': ['''foo'''], '''col_2''': [1]} ) )
writer.write_row(pa.Table.from_pydict({'''col_1''': ['''bar'''], '''col_2''': [2]} ) )
UpperCAmelCase : Union[str, Any] =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCAmelCase : List[str] ={"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(_UpperCAmelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def lowerCAmelCase_ ( )-> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase : Optional[int] ={"col_1": pa.string(), "col_2": pa.intaa()}
UpperCAmelCase : Any =os.path.join(_UpperCAmelCase , '''test.arrow''' )
with ArrowWriter(path=_UpperCAmelCase , schema=pa.schema(_UpperCAmelCase ) ) as writer:
writer.write_batch({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} )
UpperCAmelCase : List[Any] =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(_UpperCAmelCase , metadata=writer._schema.metadata )
_check_output(_UpperCAmelCase , 1 )
def lowerCAmelCase_ ( __lowerCAmelCase )-> Dict:
'''simple docstring'''
if pa.types.is_list(_UpperCAmelCase ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> List[str]:
'''simple docstring'''
if isinstance(lst[0] , _UpperCAmelCase ):
change_first_primitive_element_in_list(lst[0] , _UpperCAmelCase )
else:
UpperCAmelCase : Optional[int] =value
@pytest.mark.parametrize('''optimized_int_type, expected_dtype''' , [(None, pa.intaa()), (Value('''int32''' ), pa.intaa())] )
@pytest.mark.parametrize('''sequence''' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Tuple =pa.array(TypedSequence(_UpperCAmelCase , optimized_int_type=_UpperCAmelCase ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
'''col, expected_dtype''' , [
('''attention_mask''', pa.inta()),
('''special_tokens_mask''', pa.inta()),
('''token_type_ids''', pa.inta()),
('''input_ids''', pa.intaa()),
('''other''', pa.intaa()),
] , )
@pytest.mark.parametrize('''sequence''' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[str] =pa.array(OptimizedTypedSequence(_UpperCAmelCase , col=_UpperCAmelCase ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
UpperCAmelCase : Optional[int] =copy.deepcopy(_UpperCAmelCase )
UpperCAmelCase : str =np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase : int =pa.array(OptimizedTypedSequence(_UpperCAmelCase , col=_UpperCAmelCase ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize('''raise_exception''' , [False, True] )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Any =str(tmp_path / '''dataset-train.arrow''' )
try:
with ArrowWriter(path=_UpperCAmelCase ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def lowerCAmelCase_ ( __lowerCAmelCase )-> int:
'''simple docstring'''
UpperCAmelCase : str ="mock://dataset-train.arrow"
with ArrowWriter(path=_UpperCAmelCase , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(_UpperCAmelCase ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
UpperCAmelCase : Any =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(_UpperCAmelCase )
def lowerCAmelCase_ ( )-> List[str]:
'''simple docstring'''
UpperCAmelCase : List[str] =pa.BufferOutputStream()
with ParquetWriter(stream=_UpperCAmelCase ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
UpperCAmelCase : Dict =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
UpperCAmelCase : int =pa.BufferReader(output.getvalue() )
UpperCAmelCase : pa.Table =pq.read_table(_UpperCAmelCase )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('''embed_local_files''' , [False, True] )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Any:
'''simple docstring'''
import PIL.Image
UpperCAmelCase : Any =str(tmp_path / '''test_image_rgb.jpg''' )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(_UpperCAmelCase , format='''png''' )
UpperCAmelCase : Any =pa.BufferOutputStream()
with ParquetWriter(
stream=_UpperCAmelCase , features=Features({'''image''': Image()} ) , embed_local_files=_UpperCAmelCase ) as writer:
writer.write({'''image''': image_path} )
writer.finalize()
UpperCAmelCase : Union[str, Any] =pa.BufferReader(output.getvalue() )
UpperCAmelCase : pa.Table =pq.read_table(_UpperCAmelCase )
UpperCAmelCase : int =pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['''image'''][0]['''path'''] , _UpperCAmelCase )
with open(_UpperCAmelCase , '''rb''' ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def lowerCAmelCase_ ( )-> Dict:
'''simple docstring'''
UpperCAmelCase : Dict =pa.schema([pa.field('''col_1''' , pa.string() , nullable=_UpperCAmelCase )] )
UpperCAmelCase : int =pa.BufferOutputStream()
with ArrowWriter(stream=_UpperCAmelCase ) as writer:
writer._build_writer(inferred_schema=_UpperCAmelCase )
assert writer._schema == pa.schema([pa.field('''col_1''' , pa.string() )] )
| 348 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[int] = {"""configuration_wavlm""": ["""WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WavLMConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = [
"""WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WavLMForAudioFrameClassification""",
"""WavLMForCTC""",
"""WavLMForSequenceClassification""",
"""WavLMForXVector""",
"""WavLMModel""",
"""WavLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 13 | 0 |
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class _SCREAMING_SNAKE_CASE( nn.Module ):
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : float = 0.0
SCREAMING_SNAKE_CASE_ : int = 1
SCREAMING_SNAKE_CASE_ : int = 1
SCREAMING_SNAKE_CASE_ : bool = True
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : jnp.dtype = jnp.floataa
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = []
__SCREAMING_SNAKE_CASE :str = []
for i in range(self.num_layers ):
__SCREAMING_SNAKE_CASE :Optional[Any] = self.in_channels if i == 0 else self.out_channels
__SCREAMING_SNAKE_CASE :int = FlaxResnetBlockaD(
in_channels=lowerCAmelCase__ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE :int = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE :Optional[Any] = resnets
__SCREAMING_SNAKE_CASE :str = attentions
if self.add_downsample:
__SCREAMING_SNAKE_CASE :List[Any] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=True ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = ()
for resnet, attn in zip(self.resnets ,self.attentions ):
__SCREAMING_SNAKE_CASE :int = resnet(lowerCAmelCase__ ,lowerCAmelCase__ ,deterministic=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE :str = attn(lowerCAmelCase__ ,lowerCAmelCase__ ,deterministic=lowerCAmelCase__ )
output_states += (hidden_states,)
if self.add_downsample:
__SCREAMING_SNAKE_CASE :Optional[Any] = self.downsamplers_a(lowerCAmelCase__ )
output_states += (hidden_states,)
return hidden_states, output_states
class _SCREAMING_SNAKE_CASE( nn.Module ):
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : float = 0.0
SCREAMING_SNAKE_CASE_ : int = 1
SCREAMING_SNAKE_CASE_ : bool = True
SCREAMING_SNAKE_CASE_ : jnp.dtype = jnp.floataa
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = []
for i in range(self.num_layers ):
__SCREAMING_SNAKE_CASE :Optional[int] = self.in_channels if i == 0 else self.out_channels
__SCREAMING_SNAKE_CASE :Union[str, Any] = FlaxResnetBlockaD(
in_channels=lowerCAmelCase__ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = resnets
if self.add_downsample:
__SCREAMING_SNAKE_CASE :Union[str, Any] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=True ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = ()
for resnet in self.resnets:
__SCREAMING_SNAKE_CASE :Tuple = resnet(lowerCAmelCase__ ,lowerCAmelCase__ ,deterministic=lowerCAmelCase__ )
output_states += (hidden_states,)
if self.add_downsample:
__SCREAMING_SNAKE_CASE :Any = self.downsamplers_a(lowerCAmelCase__ )
output_states += (hidden_states,)
return hidden_states, output_states
class _SCREAMING_SNAKE_CASE( nn.Module ):
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : float = 0.0
SCREAMING_SNAKE_CASE_ : int = 1
SCREAMING_SNAKE_CASE_ : int = 1
SCREAMING_SNAKE_CASE_ : bool = True
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : jnp.dtype = jnp.floataa
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = []
__SCREAMING_SNAKE_CASE :List[Any] = []
for i in range(self.num_layers ):
__SCREAMING_SNAKE_CASE :int = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__SCREAMING_SNAKE_CASE :List[str] = self.prev_output_channel if i == 0 else self.out_channels
__SCREAMING_SNAKE_CASE :Tuple = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE :Tuple = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE :int = resnets
__SCREAMING_SNAKE_CASE :Union[str, Any] = attentions
if self.add_upsample:
__SCREAMING_SNAKE_CASE :str = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=True ) -> Optional[int]:
"""simple docstring"""
for resnet, attn in zip(self.resnets ,self.attentions ):
# pop res hidden states
__SCREAMING_SNAKE_CASE :Tuple = res_hidden_states_tuple[-1]
__SCREAMING_SNAKE_CASE :int = res_hidden_states_tuple[:-1]
__SCREAMING_SNAKE_CASE :Tuple = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
__SCREAMING_SNAKE_CASE :Tuple = resnet(lowerCAmelCase__ ,lowerCAmelCase__ ,deterministic=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE :List[Any] = attn(lowerCAmelCase__ ,lowerCAmelCase__ ,deterministic=lowerCAmelCase__ )
if self.add_upsample:
__SCREAMING_SNAKE_CASE :str = self.upsamplers_a(lowerCAmelCase__ )
return hidden_states
class _SCREAMING_SNAKE_CASE( nn.Module ):
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : float = 0.0
SCREAMING_SNAKE_CASE_ : int = 1
SCREAMING_SNAKE_CASE_ : bool = True
SCREAMING_SNAKE_CASE_ : jnp.dtype = jnp.floataa
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = []
for i in range(self.num_layers ):
__SCREAMING_SNAKE_CASE :int = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__SCREAMING_SNAKE_CASE :List[str] = self.prev_output_channel if i == 0 else self.out_channels
__SCREAMING_SNAKE_CASE :Dict = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE :int = resnets
if self.add_upsample:
__SCREAMING_SNAKE_CASE :Dict = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=True ) -> List[Any]:
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
__SCREAMING_SNAKE_CASE :Union[str, Any] = res_hidden_states_tuple[-1]
__SCREAMING_SNAKE_CASE :int = res_hidden_states_tuple[:-1]
__SCREAMING_SNAKE_CASE :Optional[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
__SCREAMING_SNAKE_CASE :Dict = resnet(lowerCAmelCase__ ,lowerCAmelCase__ ,deterministic=lowerCAmelCase__ )
if self.add_upsample:
__SCREAMING_SNAKE_CASE :Optional[Any] = self.upsamplers_a(lowerCAmelCase__ )
return hidden_states
class _SCREAMING_SNAKE_CASE( nn.Module ):
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : float = 0.0
SCREAMING_SNAKE_CASE_ : int = 1
SCREAMING_SNAKE_CASE_ : int = 1
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : jnp.dtype = jnp.floataa
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = [
FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
]
__SCREAMING_SNAKE_CASE :List[Any] = []
for _ in range(self.num_layers ):
__SCREAMING_SNAKE_CASE :str = FlaxTransformeraDModel(
in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE :Any = FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE :Optional[Any] = resnets
__SCREAMING_SNAKE_CASE :Dict = attentions
def __call__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=True ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = self.resnets[0](lowerCAmelCase__ ,lowerCAmelCase__ )
for attn, resnet in zip(self.attentions ,self.resnets[1:] ):
__SCREAMING_SNAKE_CASE :Any = attn(lowerCAmelCase__ ,lowerCAmelCase__ ,deterministic=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE :Any = resnet(lowerCAmelCase__ ,lowerCAmelCase__ ,deterministic=lowerCAmelCase__ )
return hidden_states | 191 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : List[Any] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_UpperCAmelCase : str = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Any = TextaTextGenerationPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__)
return generator, ["Something to write", "Something else"]
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any):
SCREAMING_SNAKE_CASE_: List[Any] = generator("Something there")
self.assertEqual(lowerCAmelCase__ , [{"generated_text": ANY(lowerCAmelCase__)}])
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there"))
SCREAMING_SNAKE_CASE_: List[Any] = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=lowerCAmelCase__)
self.assertEqual(
lowerCAmelCase__ , [
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
] , )
SCREAMING_SNAKE_CASE_: Dict = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=lowerCAmelCase__)
self.assertEqual(
lowerCAmelCase__ , [
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
] , )
with self.assertRaises(lowerCAmelCase__):
generator(4)
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Optional[int] = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt")
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE_: Union[str, Any] = generator("Something there" , do_sample=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , [{"generated_text": ""}])
SCREAMING_SNAKE_CASE_: Union[str, Any] = 3
SCREAMING_SNAKE_CASE_: Any = generator(
"Something there" , num_return_sequences=lowerCAmelCase__ , num_beams=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Any = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = generator("This is a test" , do_sample=lowerCAmelCase__ , num_return_sequences=2 , return_tensors=lowerCAmelCase__)
self.assertEqual(
lowerCAmelCase__ , [
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
] , )
SCREAMING_SNAKE_CASE_: str = generator.model.config.eos_token_id
SCREAMING_SNAKE_CASE_: Union[str, Any] = "<pad>"
SCREAMING_SNAKE_CASE_: Tuple = generator(
["This is a test", "This is a second test"] , do_sample=lowerCAmelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=lowerCAmelCase__ , )
self.assertEqual(
lowerCAmelCase__ , [
[
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
],
[
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
],
] , )
@require_tf
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf")
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE_: List[Any] = generator("Something there" , do_sample=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , [{"generated_text": ""}])
| 13 | 0 |
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_ ):
if len(_UpperCAmelCase ) == 0:
return []
_A : Union[str, Any] = min(_UpperCAmelCase ), max(_UpperCAmelCase )
_A : Dict = int(max_value - min_value ) + 1
_A : list[list] = [[] for _ in range(_UpperCAmelCase )]
for i in my_list:
buckets[int(i - min_value )].append(_UpperCAmelCase )
return [v for bucket in buckets for v in sorted(_UpperCAmelCase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 26 |
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = [0] * len(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = []
SCREAMING_SNAKE_CASE_: str = []
SCREAMING_SNAKE_CASE_: List[str] = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_UpperCAmelCase ) ):
if indegree[i] == 0:
queue.append(_UpperCAmelCase )
while queue:
SCREAMING_SNAKE_CASE_: Optional[int] = queue.pop(0 )
cnt += 1
topo.append(_UpperCAmelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_UpperCAmelCase )
if cnt != len(_UpperCAmelCase ):
print("Cycle exists" )
else:
print(_UpperCAmelCase )
# Adjacency List of Graph
lowerCAmelCase : Any = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 13 | 0 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True ):
'''simple docstring'''
model.train()
lowerCamelCase : Any = model(_UpperCAmelCase )
lowerCamelCase : List[Any] = F.mse_loss(_UpperCAmelCase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_UpperCAmelCase )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
'''simple docstring'''
set_seed(42 )
lowerCamelCase : Tuple = RegressionModel()
lowerCamelCase : Optional[int] = deepcopy(_UpperCAmelCase )
lowerCamelCase : Union[str, Any] = RegressionDataset(length=80 )
lowerCamelCase : Union[str, Any] = DataLoader(_UpperCAmelCase , batch_size=16 )
model.to(accelerator.device )
if sched:
lowerCamelCase : List[Any] = AdamW(params=model.parameters() , lr=1E-3 )
lowerCamelCase : str = AdamW(params=ddp_model.parameters() , lr=1E-3 )
lowerCamelCase : Union[str, Any] = LambdaLR(_UpperCAmelCase , lr_lambda=lambda SCREAMING_SNAKE_CASE_ : epoch**0.65 )
lowerCamelCase : List[Any] = LambdaLR(_UpperCAmelCase , lr_lambda=lambda SCREAMING_SNAKE_CASE_ : epoch**0.65 )
# Make a copy of `model`
if sched:
lowerCamelCase : Optional[int] = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
lowerCamelCase : Tuple = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Optional[int] = get_training_setup(_UpperCAmelCase )
# Use a single batch
lowerCamelCase : Tuple = next(iter(_UpperCAmelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase : Tuple = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_UpperCAmelCase ):
step_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
# Sync grads
step_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : Optional[Any] = ddp_input[torch.randperm(len(_UpperCAmelCase ) )]
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Any = get_training_setup(_UpperCAmelCase )
# Use a single batch
lowerCamelCase : Optional[int] = next(iter(_UpperCAmelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase : str = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase : Tuple = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_UpperCAmelCase ):
step_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
# Sync grads
step_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : Any = ddp_input[torch.randperm(len(_UpperCAmelCase ) )]
def lowercase_( SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ):
'''simple docstring'''
lowerCamelCase : List[str] = Accelerator(
split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase : Union[str, Any] = get_training_setup(_UpperCAmelCase )
for iteration, batch in enumerate(_UpperCAmelCase ):
lowerCamelCase : Any = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase : Any = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase : str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_UpperCAmelCase ):
step_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_UpperCAmelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase : Dict = ddp_input[torch.randperm(len(_UpperCAmelCase ) )]
GradientState._reset_state()
def lowercase_( SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ):
'''simple docstring'''
lowerCamelCase : List[Any] = Accelerator(
split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase : Union[str, Any] = get_training_setup(_UpperCAmelCase , _UpperCAmelCase )
for iteration, batch in enumerate(_UpperCAmelCase ):
lowerCamelCase : Optional[int] = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase : List[str] = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase : int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_UpperCAmelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_UpperCAmelCase ):
step_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"""
lowerCamelCase : Optional[Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_UpperCAmelCase ))
if accelerator.num_processes > 1:
check_model_parameters(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : List[Any] = Accelerator()
lowerCamelCase : Dict = RegressionDataset(length=80 )
lowerCamelCase : Optional[Any] = DataLoader(_UpperCAmelCase , batch_size=16 )
lowerCamelCase : Any = RegressionDataset(length=96 )
lowerCamelCase : Optional[Any] = DataLoader(_UpperCAmelCase , batch_size=16 )
lowerCamelCase : Optional[int] = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_UpperCAmelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_UpperCAmelCase )
if iteration < len(_UpperCAmelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_UpperCAmelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_UpperCAmelCase )
if batch_num < len(_UpperCAmelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = Accelerator()
lowerCamelCase : int = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(_UpperCAmelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(_UpperCAmelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(_UpperCAmelCase , _UpperCAmelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(_UpperCAmelCase , _UpperCAmelCase )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 283 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase : Optional[Any] = 16
lowerCAmelCase : List[str] = 32
def A_ ( _UpperCAmelCase , _UpperCAmelCase = 16 ):
SCREAMING_SNAKE_CASE_: Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE_: List[Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_: Tuple = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE_: List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE_: List[Any] = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE_: Optional[Any] = 8
else:
SCREAMING_SNAKE_CASE_: List[str] = None
return tokenizer.pad(
_UpperCAmelCase , padding="longest" , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Dict = DataLoader(
tokenized_datasets["train"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=(accelerator.mixed_precision == "fp8") , )
return train_dataloader, eval_dataloader
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
# Initialize accelerator
SCREAMING_SNAKE_CASE_: str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_: int = config["lr"]
SCREAMING_SNAKE_CASE_: Any = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE_: Optional[int] = int(config["seed"] )
SCREAMING_SNAKE_CASE_: List[Any] = int(config["batch_size"] )
SCREAMING_SNAKE_CASE_: List[str] = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE_: Optional[int] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE_: Tuple = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE_: Dict = MAX_GPU_BATCH_SIZE
set_seed(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_: List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_: Tuple = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_: Optional[int] = AdamW(params=model.parameters() , lr=_UpperCAmelCase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE_: Optional[int] = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE_: Tuple = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] = outputs.loss
SCREAMING_SNAKE_CASE_: Tuple = loss / gradient_accumulation_steps
accelerator.backward(_UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
SCREAMING_SNAKE_CASE_: List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , _UpperCAmelCase )
def A_ ( ):
SCREAMING_SNAKE_CASE_: Any = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
SCREAMING_SNAKE_CASE_: Optional[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE_: Optional[int] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 13 | 0 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _A ( _lowerCAmelCase ):
"""simple docstring"""
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def _A ( _lowerCAmelCase ):
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_UpperCAmelCase )
def _A ( _lowerCAmelCase ):
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
__lowercase =terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(_UpperCAmelCase , id=_UpperCAmelCase )
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
if exitstatus == 5:
__lowercase =0
# Doctest custom flag to ignore output.
lowerCamelCase = doctest.register_optionflag("""IGNORE_RESULT""")
lowerCamelCase = doctest.OutputChecker
class _UpperCamelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def __lowerCamelCase ( self : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int):
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
lowerCamelCase = CustomOutputChecker
lowerCamelCase = HfDoctestModule
lowerCamelCase = HfDocTestParser
| 166 |
from collections.abc import Callable
class __lowercase :
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Callable | None = None):
# Stores actual heap items.
SCREAMING_SNAKE_CASE_: list = []
# Stores indexes of each item for supporting updates and deletion.
SCREAMING_SNAKE_CASE_: dict = {}
# Stores current size of heap.
SCREAMING_SNAKE_CASE_: Optional[Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
SCREAMING_SNAKE_CASE_: Any = key or (lambda lowerCAmelCase__: x)
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : int):
return int((i - 1) / 2) if i > 0 else None
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Union[str, Any] = int(2 * i + 1)
return left if 0 < left < self.size else None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Union[str, Any] = int(2 * i + 2)
return right if 0 < right < self.size else None
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = self.arr[j], self.arr[i]
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
return self.arr[i][1] < self.arr[j][1]
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Any = self._left(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = self._right(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = i
if left is not None and not self._cmp(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Optional[int] = left
if right is not None and not self._cmp(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Tuple = right
return valid_parent
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: List[Any] = self._parent(lowerCAmelCase__)
while parent is not None and not self._cmp(lowerCAmelCase__ , lowerCAmelCase__):
self._swap(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = parent, self._parent(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Optional[int] = self._get_valid_parent(lowerCAmelCase__)
while valid_parent != index:
self._swap(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = valid_parent, self._get_valid_parent(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
if item not in self.pos_map:
return
SCREAMING_SNAKE_CASE_: Any = self.pos_map[item]
SCREAMING_SNAKE_CASE_: int = [item, self.key(lowerCAmelCase__)]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowerCAmelCase__)
self._heapify_down(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : int):
if item not in self.pos_map:
return
SCREAMING_SNAKE_CASE_: Optional[Any] = self.pos_map[item]
del self.pos_map[item]
SCREAMING_SNAKE_CASE_: List[str] = self.arr[self.size - 1]
SCREAMING_SNAKE_CASE_: Tuple = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowerCAmelCase__)
self._heapify_down(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Optional[int] = len(self.arr)
if arr_len == self.size:
self.arr.append([item, self.key(lowerCAmelCase__)])
else:
SCREAMING_SNAKE_CASE_: str = [item, self.key(lowerCAmelCase__)]
SCREAMING_SNAKE_CASE_: List[Any] = self.size
self.size += 1
self._heapify_up(self.size - 1)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return self.arr[0] if self.size else None
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Dict = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0])
return top_item_tuple
def A_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
A : Any = Mapping[str, np.ndarray]
A : int = Mapping[str, Any] # Is a nested dict.
A : Optional[Any] = 0.01
@dataclasses.dataclass(frozen=UpperCAmelCase_ )
class A :
'''simple docstring'''
__lowerCamelCase : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
__lowerCamelCase : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
__lowerCamelCase : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
__lowerCamelCase : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
__lowerCamelCase : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
__lowerCamelCase : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
__lowerCamelCase : Optional[str] = None
# Templates used to generate this protein (prediction-only)
__lowerCamelCase : Optional[Sequence[str]] = None
# Chain corresponding to each parent
__lowerCamelCase : Optional[Sequence[int]] = None
def __lowerCamelCase ( __a :Any ) -> int:
"""simple docstring"""
A__ = R"(\[[A-Z]+\]\n)"
A__ = [tag.strip() for tag in re.split(_UpperCAmelCase , _UpperCAmelCase ) if len(_UpperCAmelCase ) > 0]
A__ = zip(tags[0::2] , [l.split("""\n""" ) for l in tags[1::2]] )
A__ = ["N", "CA", "C"]
A__ = None
A__ = None
A__ = None
for g in groups:
if "[PRIMARY]" == g[0]:
A__ = g[1][0].strip()
for i in range(len(_UpperCAmelCase ) ):
if seq[i] not in residue_constants.restypes:
A__ = "X" # FIXME: strings are immutable
A__ = np.array(
[residue_constants.restype_order.get(_UpperCAmelCase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
A__ = []
for axis in range(3 ):
tertiary.append(list(map(_UpperCAmelCase , g[1][axis].split() ) ) )
A__ = np.array(_UpperCAmelCase )
A__ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(_UpperCAmelCase ):
A__ = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
A__ = np.array(list(map({"""-""": 0, """+""": 1}.get , g[1][0].strip() ) ) )
A__ = np.zeros(
(
len(_UpperCAmelCase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(_UpperCAmelCase ):
A__ = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=_UpperCAmelCase , atom_mask=_UpperCAmelCase , aatype=_UpperCAmelCase , residue_index=np.arange(len(_UpperCAmelCase ) ) , b_factors=_UpperCAmelCase , )
def __lowerCamelCase ( __a :Tuple , __a :Tuple = 0 ) -> Optional[Any]:
"""simple docstring"""
A__ = []
A__ = prot.remark
if remark is not None:
pdb_headers.append(F'REMARK {remark}' )
A__ = prot.parents
A__ = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
A__ = [p for i, p in zip(_UpperCAmelCase , _UpperCAmelCase ) if i == chain_id]
if parents is None or len(_UpperCAmelCase ) == 0:
A__ = ["N/A"]
pdb_headers.append(F'PARENT {" ".join(_UpperCAmelCase )}' )
return pdb_headers
def __lowerCamelCase ( __a :Union[str, Any] , __a :str ) -> Optional[Any]:
"""simple docstring"""
A__ = []
A__ = pdb_str.split("""\n""" )
A__ = prot.remark
if remark is not None:
out_pdb_lines.append(F'REMARK {remark}' )
A__ = 42
if prot.parents is not None and len(prot.parents ) > 0:
A__ = []
if prot.parents_chain_index is not None:
A__ = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(_UpperCAmelCase ) , [] )
parent_dict[str(_UpperCAmelCase )].append(_UpperCAmelCase )
A__ = max([int(_UpperCAmelCase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
A__ = parent_dict.get(str(_UpperCAmelCase ) , ["""N/A"""] )
parents_per_chain.append(_UpperCAmelCase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
A__ = [["N/A"]]
def make_parent_line(__a :List[Any] ) -> str:
return F'PARENT {" ".join(_UpperCAmelCase )}'
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
A__ = 0
for i, l in enumerate(_UpperCAmelCase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(_UpperCAmelCase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(_UpperCAmelCase ):
A__ = parents_per_chain[chain_counter]
else:
A__ = ["N/A"]
out_pdb_lines.append(make_parent_line(_UpperCAmelCase ) )
return "\n".join(_UpperCAmelCase )
def __lowerCamelCase ( __a :Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
A__ = residue_constants.restypes + ["X"]
def res_atoa(__a :Any ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , """UNK""" )
A__ = residue_constants.atom_types
A__ = []
A__ = prot.atom_mask
A__ = prot.aatype
A__ = prot.atom_positions
A__ = prot.residue_index.astype(np.intaa )
A__ = prot.b_factors
A__ = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("""Invalid aatypes.""" )
A__ = get_pdb_headers(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
pdb_lines.extend(_UpperCAmelCase )
A__ = aatype.shape[0]
A__ = 1
A__ = 0
A__ = string.ascii_uppercase
A__ = None
# Add all atom sites.
for i in range(_UpperCAmelCase ):
A__ = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(_UpperCAmelCase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
A__ = "ATOM"
A__ = atom_name if len(_UpperCAmelCase ) == 4 else F' {atom_name}'
A__ = ""
A__ = ""
A__ = 1.00
A__ = atom_name[0] # Protein supports only C, N, O, S, this works.
A__ = ""
A__ = "A"
if chain_index is not None:
A__ = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
A__ = (
F'{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'
F'{res_name_a:>3} {chain_tag:>1}'
F'{residue_index[i]:>4}{insertion_code:>1} '
F'{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'
F'{occupancy:>6.2f}{b_factor:>6.2f} '
F'{element:>2}{charge:>2}'
)
pdb_lines.append(_UpperCAmelCase )
atom_index += 1
A__ = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
A__ = True
A__ = chain_index[i + 1]
if should_terminate:
# Close the chain.
A__ = "TER"
A__ = (
F'{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}'
)
pdb_lines.append(_UpperCAmelCase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(_UpperCAmelCase , _UpperCAmelCase ) )
pdb_lines.append("""END""" )
pdb_lines.append("""""" )
return "\n".join(_UpperCAmelCase )
def __lowerCamelCase ( __a :List[str] ) -> Optional[Any]:
"""simple docstring"""
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def __lowerCamelCase ( __a :Any , __a :Union[str, Any] , __a :int = None , __a :int = None , __a :Optional[Any] = None , __a :Any = None , __a :Optional[int] = None , ) -> Optional[Any]:
"""simple docstring"""
return Protein(
aatype=features["""aatype"""] , atom_positions=result["""final_atom_positions"""] , atom_mask=result["""final_atom_mask"""] , residue_index=features["""residue_index"""] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["""final_atom_mask"""] ) , chain_index=_UpperCAmelCase , remark=_UpperCAmelCase , parents=_UpperCAmelCase , parents_chain_index=_UpperCAmelCase , )
| 274 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCAmelCase : Any = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCAmelCase : Dict = TaTokenizerFast
lowerCAmelCase : Optional[int] = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(
__name__,
globals()["""__file__"""],
_import_structure,
extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast},
module_spec=__spec__,
)
| 13 | 0 |
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCamelCase__ ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , snake_case , snake_case , snake_case=1_0_2_4 , snake_case=1_0_2_4 , snake_case=3.6 ):
'''simple docstring'''
UpperCAmelCase : List[Any] = tokenizer
UpperCAmelCase : str = tokenizer.bos_token_id
UpperCAmelCase : Optional[Any] = dataset
UpperCAmelCase : Tuple = seq_length
UpperCAmelCase : str = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
'''simple docstring'''
UpperCAmelCase : Dict = iter(self.dataset )
UpperCAmelCase : Union[str, Any] = True
while more_examples:
UpperCAmelCase : Optional[int] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(lowerCAmelCase__ )["content"] )
buffer_len += len(buffer[-1] )
except StopIteration:
UpperCAmelCase : str = False
break
UpperCAmelCase : str = tokenizer(lowerCAmelCase__ , truncation=lowerCAmelCase__ )["input_ids"]
UpperCAmelCase : Optional[Any] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(lowerCAmelCase__ ) , self.seq_length ):
UpperCAmelCase : Tuple = all_token_ids[i : i + self.seq_length]
if len(lowerCAmelCase__ ) == self.seq_length:
yield torch.tensor(lowerCAmelCase__ )
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Any = {"streaming": True}
UpperCAmelCase : Any = load_dataset(args.dataset_name , split="train" , **_UpperCAmelCase )
UpperCAmelCase : int = ConstantLengthDataset(_UpperCAmelCase , _UpperCAmelCase , seq_length=args.seq_length )
UpperCAmelCase : Tuple = DataLoader(_UpperCAmelCase , batch_size=args.batch_size )
return eval_dataloader
def lowercase ( __magic_name__ ):
'''simple docstring'''
model.eval()
UpperCAmelCase : Optional[Any] = []
for step, batch in enumerate(_UpperCAmelCase ):
with torch.no_grad():
UpperCAmelCase : Optional[int] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
UpperCAmelCase : int = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_UpperCAmelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
UpperCAmelCase : Optional[int] = torch.mean(torch.cat(_UpperCAmelCase ) )
try:
UpperCAmelCase : Dict = torch.exp(_UpperCAmelCase )
except OverflowError:
UpperCAmelCase : Any = float("inf" )
return loss.item(), perplexity.item()
# Setup Accelerator
a : Optional[Any] = Accelerator()
# Parse configuration
a : List[str] = HfArgumentParser(EvaluationArguments)
a : List[str] = parser.parse_args()
set_seed(args.seed)
# Logging
a : List[str] = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
# Load model and tokenizer
a : Optional[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
a : Any = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
a : Optional[Any] = create_dataloader(args)
# Prepare everything with our `accelerator`.
a : List[str] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("Evaluating and saving model after training")
a : List[str] = evaluate(args)
logger.info(F'loss/eval: {eval_loss}, perplexity: {perplexity}')
| 311 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : List[str] = ShapEPipeline
_UpperCAmelCase : Tuple = ['''prompt''']
_UpperCAmelCase : Dict = ['''prompt''']
_UpperCAmelCase : Any = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
_UpperCAmelCase : Optional[int] = False
@property
def _SCREAMING_SNAKE_CASE ( self : List[str]):
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : List[str]):
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : int):
return self.time_input_dim * 4
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return 8
@property
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
return tokenizer
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowerCAmelCase__)
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Tuple = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
SCREAMING_SNAKE_CASE_: Any = PriorTransformer(**lowerCAmelCase__)
return model
@property
def _SCREAMING_SNAKE_CASE ( self : Dict):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Union[str, Any] = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE_: Optional[int] = ShapERenderer(**lowerCAmelCase__)
return model
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Dict = self.dummy_prior
SCREAMING_SNAKE_CASE_: Optional[Any] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.dummy_tokenizer
SCREAMING_SNAKE_CASE_: List[str] = self.dummy_renderer
SCREAMING_SNAKE_CASE_: Any = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1024 , prediction_type="sample" , use_karras_sigmas=lowerCAmelCase__ , clip_sample=lowerCAmelCase__ , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE_: Optional[int] = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]=0):
if str(lowerCAmelCase__).startswith("mps"):
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.manual_seed(lowerCAmelCase__)
else:
SCREAMING_SNAKE_CASE_: Any = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: str = "cpu"
SCREAMING_SNAKE_CASE_: Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE_: Dict = self.pipeline_class(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = pipe(**self.get_dummy_inputs(lowerCAmelCase__))
SCREAMING_SNAKE_CASE_: Optional[Any] = output.images[0]
SCREAMING_SNAKE_CASE_: Any = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE_: Union[str, Any] = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Dict = torch_device == "cpu"
SCREAMING_SNAKE_CASE_: List[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCAmelCase__ , relax_max_difference=lowerCAmelCase__ , )
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE_: str = self.pipeline_class(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = 1
SCREAMING_SNAKE_CASE_: Any = 2
SCREAMING_SNAKE_CASE_: Dict = self.get_dummy_inputs(lowerCAmelCase__)
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE_: List[Any] = batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE_: Tuple = pipe(**lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__)[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Any):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_np_out.npy")
SCREAMING_SNAKE_CASE_: List[str] = ShapEPipeline.from_pretrained("openai/shap-e")
SCREAMING_SNAKE_CASE_: Optional[int] = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = torch.Generator(device=lowerCAmelCase__).manual_seed(0)
SCREAMING_SNAKE_CASE_: int = pipe(
"a shark" , generator=lowerCAmelCase__ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__)
| 13 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
UpperCamelCase__ = {
"""configuration_speecht5""": [
"""SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP""",
"""SpeechT5Config""",
"""SpeechT5HifiGanConfig""",
],
"""feature_extraction_speecht5""": ["""SpeechT5FeatureExtractor"""],
"""processing_speecht5""": ["""SpeechT5Processor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ["""SpeechT5Tokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"""SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SpeechT5ForSpeechToText""",
"""SpeechT5ForSpeechToSpeech""",
"""SpeechT5ForTextToSpeech""",
"""SpeechT5Model""",
"""SpeechT5PreTrainedModel""",
"""SpeechT5HifiGan""",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 65 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 13 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__A =random.Random()
def a ( _UpperCAmelCase : Dict , _UpperCAmelCase : int=1.0 , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Any=None ):
'''simple docstring'''
if rng is None:
__UpperCAmelCase : Tuple = global_rng
__UpperCAmelCase : Dict = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , a_ : Optional[Any] , a_ : Optional[Any]=7 , a_ : Tuple=4_00 , a_ : int=20_00 , a_ : int=10 , a_ : Any=1_60 , a_ : Optional[Any]=8 , a_ : Any=0.0 , a_ : int=40_00 , a_ : str=False , a_ : Union[str, Any]=True , ):
'''simple docstring'''
__UpperCAmelCase : str = parent
__UpperCAmelCase : Union[str, Any] = batch_size
__UpperCAmelCase : Optional[Any] = min_seq_length
__UpperCAmelCase : str = max_seq_length
__UpperCAmelCase : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCAmelCase : Union[str, Any] = padding_value
__UpperCAmelCase : Union[str, Any] = sampling_rate
__UpperCAmelCase : List[str] = return_attention_mask
__UpperCAmelCase : Optional[int] = do_normalize
__UpperCAmelCase : int = feature_size
__UpperCAmelCase : Union[str, Any] = chunk_length
__UpperCAmelCase : Optional[Any] = hop_length
def snake_case__ ( self : List[str] ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case__ ( self : Union[str, Any] , a_ : Optional[int]=False , a_ : Dict=False ):
'''simple docstring'''
def _flatten(a_ : int ):
return list(itertools.chain(*lowerCAmelCase__ ) )
if equal_length:
__UpperCAmelCase : List[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__UpperCAmelCase : List[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__UpperCAmelCase : Tuple = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( UpperCAmelCase_ ,unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = WhisperFeatureExtractor if is_speech_available() else None
def snake_case__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = WhisperFeatureExtractionTester(self )
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : str = feat_extract_first.save_pretrained(lowerCAmelCase__ )[0]
check_json_file_has_correct_format(lowerCAmelCase__ )
__UpperCAmelCase : Tuple = self.feature_extraction_class.from_pretrained(lowerCAmelCase__ )
__UpperCAmelCase : Dict = feat_extract_first.to_dict()
__UpperCAmelCase : List[Any] = feat_extract_second.to_dict()
__UpperCAmelCase : Dict = feat_extract_first.mel_filters
__UpperCAmelCase : Any = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Optional[Any] = os.path.join(lowerCAmelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(lowerCAmelCase__ )
__UpperCAmelCase : Optional[int] = self.feature_extraction_class.from_json_file(lowerCAmelCase__ )
__UpperCAmelCase : Dict = feat_extract_first.to_dict()
__UpperCAmelCase : Dict = feat_extract_second.to_dict()
__UpperCAmelCase : Any = feat_extract_first.mel_filters
__UpperCAmelCase : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__UpperCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__UpperCAmelCase : Dict = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test feature size
__UpperCAmelCase : List[str] = feature_extractor(lowerCAmelCase__ , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__UpperCAmelCase : Optional[Any] = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
__UpperCAmelCase : Dict = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) )
# Test batched
__UpperCAmelCase : int = feature_extractor(lowerCAmelCase__ , return_tensors='''np''' ).input_features
__UpperCAmelCase : Optional[int] = feature_extractor(lowerCAmelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__UpperCAmelCase : int = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
__UpperCAmelCase : Union[str, Any] = np.asarray(lowerCAmelCase__ )
__UpperCAmelCase : str = feature_extractor(lowerCAmelCase__ , return_tensors='''np''' ).input_features
__UpperCAmelCase : Union[str, Any] = feature_extractor(lowerCAmelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) )
# Test truncation required
__UpperCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(2_00 , (feature_extractor.n_samples + 5_00) , 2_00 )]
__UpperCAmelCase : List[Any] = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
__UpperCAmelCase : Union[str, Any] = [x[: feature_extractor.n_samples] for x in speech_inputs]
__UpperCAmelCase : Tuple = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs_truncated]
__UpperCAmelCase : Any = feature_extractor(lowerCAmelCase__ , return_tensors='''np''' ).input_features
__UpperCAmelCase : Any = feature_extractor(lowerCAmelCase__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 ) )
def snake_case__ ( self : str ):
'''simple docstring'''
import torch
__UpperCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase : Union[str, Any] = np.random.rand(1_00 , 32 ).astype(np.floataa )
__UpperCAmelCase : Any = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__UpperCAmelCase : str = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__UpperCAmelCase : List[Any] = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def snake_case__ ( self : Dict , a_ : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
__UpperCAmelCase : Union[str, Any] = ds.sort('''id''' ).select(range(lowerCAmelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def snake_case__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : str = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
__UpperCAmelCase : Union[str, Any] = self._load_datasamples(1 )
__UpperCAmelCase : Optional[int] = WhisperFeatureExtractor()
__UpperCAmelCase : str = feature_extractor(lowerCAmelCase__ , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 80, 30_00) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , lowerCAmelCase__ , atol=1e-4 ) )
def snake_case__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase : Dict = self._load_datasamples(1 )[0]
__UpperCAmelCase : Union[str, Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_55_35 # Rescale to [0, 65535] to show issue
__UpperCAmelCase : int = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCAmelCase__ )[0]
self.assertTrue(np.all(np.mean(lowerCAmelCase__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase__ ) - 1 ) < 1e-3 ) )
| 226 |
class __lowercase :
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]):
SCREAMING_SNAKE_CASE_: List[str] = name
SCREAMING_SNAKE_CASE_: Union[str, Any] = val
def __str__( self : Dict):
return F"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self : List[str] , lowerCAmelCase__ : Any):
return self.val < other.val
class __lowercase :
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: int = {}
SCREAMING_SNAKE_CASE_: Any = self.build_heap(lowerCAmelCase__)
def __getitem__( self : List[Any] , lowerCAmelCase__ : Dict):
return self.get_value(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Dict):
return (idx - 1) // 2
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any]):
return idx * 2 + 1
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Tuple):
return idx * 2 + 2
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Optional[int]):
return self.heap_dict[key]
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Tuple = len(lowerCAmelCase__) - 1
SCREAMING_SNAKE_CASE_: List[str] = self.get_parent_idx(lowerCAmelCase__)
for idx, i in enumerate(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Union[str, Any] = idx
SCREAMING_SNAKE_CASE_: str = i.val
for i in range(lowerCAmelCase__ , -1 , -1):
self.sift_down(lowerCAmelCase__ , lowerCAmelCase__)
return array
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str]):
while True:
SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_left_child_idx(lowerCAmelCase__) # noqa: E741
SCREAMING_SNAKE_CASE_: Dict = self.get_right_child_idx(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = idx
if l < len(lowerCAmelCase__) and array[l] < array[idx]:
SCREAMING_SNAKE_CASE_: List[str] = l
if r < len(lowerCAmelCase__) and array[r] < array[smallest]:
SCREAMING_SNAKE_CASE_: str = r
if smallest != idx:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = array[smallest], array[idx]
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
): Optional[Any] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
SCREAMING_SNAKE_CASE_: Optional[int] = smallest
else:
break
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: Any = self.get_parent_idx(lowerCAmelCase__)
while p >= 0 and self.heap[p] > self.heap[idx]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = self.heap[idx], self.heap[p]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
SCREAMING_SNAKE_CASE_: Union[str, Any] = p
SCREAMING_SNAKE_CASE_: Optional[int] = self.get_parent_idx(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return self.heap[0]
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self.heap[-1], self.heap[0]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
SCREAMING_SNAKE_CASE_: int = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap)
return x
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Tuple):
self.heap.append(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = len(self.heap) - 1
SCREAMING_SNAKE_CASE_: List[str] = node.val
self.sift_up(len(self.heap) - 1)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return len(self.heap) == 0
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int]):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
SCREAMING_SNAKE_CASE_: Any = new_value
SCREAMING_SNAKE_CASE_: Tuple = new_value
self.sift_up(self.idx_of_element[node])
lowerCAmelCase : int = Node("""R""", -1)
lowerCAmelCase : str = Node("""B""", 6)
lowerCAmelCase : str = Node("""A""", 3)
lowerCAmelCase : List[str] = Node("""X""", 1)
lowerCAmelCase : Union[str, Any] = Node("""E""", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
lowerCAmelCase : Optional[Any] = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("""Min Heap - before decrease key""")
for i in my_min_heap.heap:
print(i)
print("""Min Heap - After decrease key of node [B -> -17]""")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self : Dict ) ->List[str]:
snake_case__ : Any = TFAutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' )
snake_case__ : List[Any] = AutoTokenizer.from_pretrained('google/mt5-small' )
snake_case__ : int = tokenizer('Hello there', return_tensors='tf' ).input_ids
snake_case__ : Any = tokenizer('Hi I am', return_tensors='tf' ).input_ids
snake_case__ : Union[str, Any] = model(lowerCAmelCase__, labels=lowerCAmelCase__ ).loss
snake_case__ : str = -tf.math.reduce_mean(lowerCAmelCase__ ).numpy()
snake_case__ : Tuple = -2_1.2_2_8_1_6_8
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 277 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowerCAmelCase : Any = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
if rng is None:
SCREAMING_SNAKE_CASE_: List[Any] = random.Random()
SCREAMING_SNAKE_CASE_: Optional[Any] = 1
for dim in shape:
total_dims *= dim
SCREAMING_SNAKE_CASE_: Optional[Any] = []
for _ in range(_UpperCAmelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
SCREAMING_SNAKE_CASE_: List[Any] = np.array(_UpperCAmelCase , dtype=jnp.intaa ).reshape(_UpperCAmelCase )
return output
def A_ ( _UpperCAmelCase , _UpperCAmelCase=None ):
SCREAMING_SNAKE_CASE_: Optional[int] = ids_tensor(_UpperCAmelCase , vocab_size=2 , rng=_UpperCAmelCase )
# make sure that at least one token is attended to for each batch
SCREAMING_SNAKE_CASE_: Optional[Any] = 1
return attn_mask
@require_flax
class __lowercase :
"""simple docstring"""
_UpperCAmelCase : Any = None
_UpperCAmelCase : List[Any] = ()
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
SCREAMING_SNAKE_CASE_: str = 2
SCREAMING_SNAKE_CASE_: Optional[int] = inputs["input_ids"].shape[-1] // 2
SCREAMING_SNAKE_CASE_: List[str] = inputs["input_ids"][:max_batch_size, :sequence_length]
SCREAMING_SNAKE_CASE_: Any = jnp.ones_like(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
SCREAMING_SNAKE_CASE_: Optional[Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
SCREAMING_SNAKE_CASE_: Optional[Any] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Union[str, Any] = False
SCREAMING_SNAKE_CASE_: Dict = max_length
SCREAMING_SNAKE_CASE_: List[Any] = 0
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: int = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE_: List[Any] = getattr(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = pt_model_class(lowerCAmelCase__).eval()
SCREAMING_SNAKE_CASE_: str = load_flax_weights_in_pytorch_model(lowerCAmelCase__ , flax_model.params)
SCREAMING_SNAKE_CASE_: List[Any] = flax_model.generate(lowerCAmelCase__).sequences
SCREAMING_SNAKE_CASE_: str = pt_model.generate(torch.tensor(lowerCAmelCase__ , dtype=torch.long))
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
SCREAMING_SNAKE_CASE_: List[Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Optional[int] = False
SCREAMING_SNAKE_CASE_: Optional[int] = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Union[str, Any] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = jit(model.generate)
SCREAMING_SNAKE_CASE_: Union[str, Any] = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Optional[Any] = True
SCREAMING_SNAKE_CASE_: Dict = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Tuple = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = jit(model.generate)
SCREAMING_SNAKE_CASE_: Dict = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: int = False
SCREAMING_SNAKE_CASE_: Optional[int] = max_length
SCREAMING_SNAKE_CASE_: Optional[int] = 2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: List[str] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = jit(model.generate)
SCREAMING_SNAKE_CASE_: Optional[int] = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: str = False
SCREAMING_SNAKE_CASE_: int = max_length
SCREAMING_SNAKE_CASE_: str = 2
SCREAMING_SNAKE_CASE_: Optional[Any] = 2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: str = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences)
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Tuple = True
SCREAMING_SNAKE_CASE_: List[str] = max_length
SCREAMING_SNAKE_CASE_: Any = 0.8
SCREAMING_SNAKE_CASE_: Any = 10
SCREAMING_SNAKE_CASE_: List[str] = 0.3
SCREAMING_SNAKE_CASE_: Tuple = 1
SCREAMING_SNAKE_CASE_: Union[str, Any] = 8
SCREAMING_SNAKE_CASE_: int = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: List[str] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = jit(model.generate)
SCREAMING_SNAKE_CASE_: List[Any] = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Any = max_length
SCREAMING_SNAKE_CASE_: int = 1
SCREAMING_SNAKE_CASE_: Union[str, Any] = 8
SCREAMING_SNAKE_CASE_: List[Any] = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: int = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = jit(model.generate)
SCREAMING_SNAKE_CASE_: List[str] = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Any = max_length
SCREAMING_SNAKE_CASE_: List[str] = 2
SCREAMING_SNAKE_CASE_: str = 1
SCREAMING_SNAKE_CASE_: Tuple = 8
SCREAMING_SNAKE_CASE_: List[Any] = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Optional[int] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = jit(model.generate)
SCREAMING_SNAKE_CASE_: List[str] = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_: Dict = attention_mask.at[(0, 0)].set(0)
SCREAMING_SNAKE_CASE_: Dict = False
SCREAMING_SNAKE_CASE_: Optional[int] = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Any = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = model.generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = jit(model.generate)
SCREAMING_SNAKE_CASE_: List[Any] = jit_generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_: List[Any] = attention_mask.at[(0, 0)].set(0)
SCREAMING_SNAKE_CASE_: Optional[int] = True
SCREAMING_SNAKE_CASE_: Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: str = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = model.generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = jit(model.generate)
SCREAMING_SNAKE_CASE_: Optional[Any] = jit_generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_: Dict = attention_mask.at[(0, 0)].set(0)
SCREAMING_SNAKE_CASE_: Optional[Any] = 2
SCREAMING_SNAKE_CASE_: Any = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Tuple = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = model.generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = jit(model.generate)
SCREAMING_SNAKE_CASE_: Union[str, Any] = jit_generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
@require_flax
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert")
SCREAMING_SNAKE_CASE_: List[Any] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only")
SCREAMING_SNAKE_CASE_: Optional[int] = "Hello world"
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer(lowerCAmelCase__ , return_tensors="np").input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(lowerCAmelCase__ , "do_samples"):
model.generate(lowerCAmelCase__ , do_samples=lowerCAmelCase__)
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(lowerCAmelCase__ , "foo"):
SCREAMING_SNAKE_CASE_: str = {"foo": "bar"}
model.generate(lowerCAmelCase__ , **lowerCAmelCase__)
| 13 | 0 |
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
lowercase : List[Any] = None
lowercase : str = logging.get_logger(__name__)
lowercase : Union[str, Any] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowercase : Optional[Any] = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/tokenizer.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/tokenizer.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/tokenizer.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/tokenizer.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/tokenizer.json""",
},
}
# TODO(PVP) - this should be removed in Transformers v5
lowercase : str = {
"""t5-small""": 512,
"""t5-base""": 512,
"""t5-large""": 512,
"""t5-3b""": 512,
"""t5-11b""": 512,
}
class __snake_case ( UpperCAmelCase_ ):
_a : Any= VOCAB_FILES_NAMES
_a : Dict= PRETRAINED_VOCAB_FILES_MAP
_a : int= PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : str= ['''input_ids''', '''attention_mask''']
_a : Optional[int]= TaTokenizer
_a : List[int]= []
def __init__( self ,snake_case=None ,snake_case=None ,snake_case="</s>" ,snake_case="<unk>" ,snake_case="<pad>" ,snake_case=100 ,snake_case=None ,**snake_case ,):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
lowercase : Optional[int] = [f"<extra_id_{i}>" for i in range(lowerCAmelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowercase : List[str] = len(set(filter(lambda snake_case : bool("""extra_id_""" in str(lowerCAmelCase__ ) ) ,lowerCAmelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
super().__init__(
lowerCAmelCase__ ,tokenizer_file=lowerCAmelCase__ ,eos_token=lowerCAmelCase__ ,unk_token=lowerCAmelCase__ ,pad_token=lowerCAmelCase__ ,extra_ids=lowerCAmelCase__ ,additional_special_tokens=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
lowercase : Dict = vocab_file
lowercase : Dict = False if not self.vocab_file else True
lowercase : List[str] = extra_ids
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case ,snake_case ,snake_case ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowercase : List[str] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
f" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
f" {pretrained_model_name_or_path} automatically truncating your input to"
f" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
f" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" ,lowerCAmelCase__ ,)
return max_model_length
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase : Tuple = os.path.join(
lowerCAmelCase__ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file ,lowerCAmelCase__ )
logger.info(f"Copy vocab file to {out_vocab_file}" )
return (out_vocab_file,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ):
'''simple docstring'''
lowercase : Optional[int] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowercase : Optional[int] = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ):
'''simple docstring'''
lowercase : List[Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return list(
set(filter(lambda snake_case : bool(re.search(r"""<extra_id_\d+>""" ,lowerCAmelCase__ ) ) is not None ,self.additional_special_tokens ) ) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return [self.convert_tokens_to_ids(lowerCAmelCase__ ) for token in self.get_sentinel_tokens()]
| 20 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCAmelCase : Union[str, Any] = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
lowerCAmelCase : int = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def A_ ( _UpperCAmelCase , _UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = create_model(
"HTSAT-tiny" , "roberta" , _UpperCAmelCase , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=_UpperCAmelCase , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = {}
SCREAMING_SNAKE_CASE_: Tuple = R".*sequential.(\d+).*"
SCREAMING_SNAKE_CASE_: Dict = R".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
SCREAMING_SNAKE_CASE_: Any = key.replace(_UpperCAmelCase , _UpperCAmelCase )
if re.match(_UpperCAmelCase , _UpperCAmelCase ):
# replace sequential layers with list
SCREAMING_SNAKE_CASE_: Optional[int] = re.match(_UpperCAmelCase , _UpperCAmelCase ).group(1 )
SCREAMING_SNAKE_CASE_: Dict = key.replace(f"sequential.{sequential_layer}." , f"layers.{int(_UpperCAmelCase )//3}.linear." )
elif re.match(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = int(re.match(_UpperCAmelCase , _UpperCAmelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
SCREAMING_SNAKE_CASE_: Optional[int] = 1 if projecton_layer == 0 else 2
SCREAMING_SNAKE_CASE_: Dict = key.replace(f"_projection.{projecton_layer}." , f"_projection.linear{transformers_projection_layer}." )
if "audio" and "qkv" in key:
# split qkv into query key and value
SCREAMING_SNAKE_CASE_: Tuple = value
SCREAMING_SNAKE_CASE_: List[str] = mixed_qkv.size(0 ) // 3
SCREAMING_SNAKE_CASE_: Any = mixed_qkv[:qkv_dim]
SCREAMING_SNAKE_CASE_: Optional[int] = mixed_qkv[qkv_dim : qkv_dim * 2]
SCREAMING_SNAKE_CASE_: Optional[Any] = mixed_qkv[qkv_dim * 2 :]
SCREAMING_SNAKE_CASE_: str = query_layer
SCREAMING_SNAKE_CASE_: int = key_layer
SCREAMING_SNAKE_CASE_: List[Any] = value_layer
else:
SCREAMING_SNAKE_CASE_: int = value
return model_state_dict
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = init_clap(_UpperCAmelCase , enable_fusion=_UpperCAmelCase )
clap_model.eval()
SCREAMING_SNAKE_CASE_: Union[str, Any] = clap_model.state_dict()
SCREAMING_SNAKE_CASE_: Optional[int] = rename_state_dict(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = ClapConfig()
SCREAMING_SNAKE_CASE_: Tuple = enable_fusion
SCREAMING_SNAKE_CASE_: Tuple = ClapModel(_UpperCAmelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
transformers_config.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
lowerCAmelCase : int = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 13 | 0 |
import math
def lowerCAmelCase_ ( __lowerCAmelCase )-> List[Any]:
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase : Any =f'''Input value of [number={number}] must be an integer'''
raise TypeError(_UpperCAmelCase )
if number < 1:
UpperCAmelCase : Tuple =f'''Input value of [number={number}] must be > 0'''
raise ValueError(_UpperCAmelCase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
UpperCAmelCase : Dict =int(math.log(number // 3 , 2 ) ) + 2
UpperCAmelCase : Tuple =[3, 5]
UpperCAmelCase : int =2
UpperCAmelCase : int =3
for block in range(1 , _UpperCAmelCase ):
for _ in range(_UpperCAmelCase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
__snake_case = 0
try:
__snake_case = proth(number)
except ValueError:
print(f'ValueError: there is no {number}th Proth number')
continue
print(f'The {number}th Proth number: {value}')
| 348 |
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any=13 , lowerCAmelCase__ : Tuple=30 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : str=32 , lowerCAmelCase__ : Any=5 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : int=37 , lowerCAmelCase__ : Optional[Any]="gelu" , lowerCAmelCase__ : Optional[int]=0.1 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Tuple=10 , lowerCAmelCase__ : Optional[Any]=0.02 , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Union[str, Any]=2 , ):
SCREAMING_SNAKE_CASE_: str = parent
SCREAMING_SNAKE_CASE_: Optional[Any] = batch_size
SCREAMING_SNAKE_CASE_: str = image_size
SCREAMING_SNAKE_CASE_: Tuple = patch_size
SCREAMING_SNAKE_CASE_: int = num_channels
SCREAMING_SNAKE_CASE_: List[str] = is_training
SCREAMING_SNAKE_CASE_: str = use_labels
SCREAMING_SNAKE_CASE_: int = hidden_size
SCREAMING_SNAKE_CASE_: List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_: Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE_: Any = intermediate_size
SCREAMING_SNAKE_CASE_: str = hidden_act
SCREAMING_SNAKE_CASE_: str = hidden_dropout_prob
SCREAMING_SNAKE_CASE_: List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: int = type_sequence_label_size
SCREAMING_SNAKE_CASE_: Dict = initializer_range
SCREAMING_SNAKE_CASE_: Dict = scope
SCREAMING_SNAKE_CASE_: Dict = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_: List[Any] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_: Dict = num_patches + 1
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_: Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple):
SCREAMING_SNAKE_CASE_: Union[str, Any] = ViTModel(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Optional[int] = model(lowerCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Optional[int] = ViTForMaskedImageModeling(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: str = model(lowerCAmelCase__)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
SCREAMING_SNAKE_CASE_: Dict = 1
SCREAMING_SNAKE_CASE_: List[str] = ViTForMaskedImageModeling(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: str = model(lowerCAmelCase__)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Tuple = self.type_sequence_label_size
SCREAMING_SNAKE_CASE_: List[str] = ViTForImageClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Any = model(lowerCAmelCase__ , labels=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
SCREAMING_SNAKE_CASE_: Union[str, Any] = 1
SCREAMING_SNAKE_CASE_: List[str] = ViTForImageClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: Dict = model(lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
): List[str] = config_and_inputs
SCREAMING_SNAKE_CASE_: Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : List[Any] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_UpperCAmelCase : Tuple = (
{'''feature-extraction''': ViTModel, '''image-classification''': ViTForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase : List[str] = True
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Tuple = False
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: List[str] = ViTModelTester(self)
SCREAMING_SNAKE_CASE_: Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : Any):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds")
def _SCREAMING_SNAKE_CASE ( self : str):
pass
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Dict = model_class(lowerCAmelCase__)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
SCREAMING_SNAKE_CASE_: List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear))
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: List[Any] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: Optional[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__)
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: Union[str, Any] = ViTModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
def A_ ( ):
SCREAMING_SNAKE_CASE_: List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _SCREAMING_SNAKE_CASE ( self : int):
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224") if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: int = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE_: str = prepare_img()
SCREAMING_SNAKE_CASE_: Optional[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt").to(lowerCAmelCase__)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = model(**lowerCAmelCase__)
# verify the logits
SCREAMING_SNAKE_CASE_: Any = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = torch.tensor([-0.2744, 0.8215, -0.0836]).to(lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4))
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
SCREAMING_SNAKE_CASE_: str = ViTModel.from_pretrained("facebook/dino-vits8").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480)
SCREAMING_SNAKE_CASE_: List[Any] = prepare_img()
SCREAMING_SNAKE_CASE_: List[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt")
SCREAMING_SNAKE_CASE_: int = inputs.pixel_values.to(lowerCAmelCase__)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = model(lowerCAmelCase__ , interpolate_pos_encoding=lowerCAmelCase__)
# verify the logits
SCREAMING_SNAKE_CASE_: Tuple = torch.Size((1, 3601, 384))
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]]).to(lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4))
@slow
@require_accelerate
@require_torch_gpu
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Dict = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto")
SCREAMING_SNAKE_CASE_: int = self.default_image_processor
SCREAMING_SNAKE_CASE_: Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE_: Dict = image_processor(images=lowerCAmelCase__ , return_tensors="pt")
SCREAMING_SNAKE_CASE_: str = inputs.pixel_values.to(lowerCAmelCase__)
# forward pass to make sure inference works in fp16
with torch.no_grad():
SCREAMING_SNAKE_CASE_: str = model(lowerCAmelCase__)
| 13 | 0 |
"""simple docstring"""
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
lowerCamelCase_ = TypeVar("T")
def __lowerCamelCase ( a_ : List[str] ) -> Any:
return (position - 1) // 2
def __lowerCamelCase ( a_ : Optional[Any] ) -> Dict:
return (2 * position) + 1
def __lowerCamelCase ( a_ : Optional[int] ) -> Any:
return (2 * position) + 2
class _SCREAMING_SNAKE_CASE( Generic[T] ):
def __init__( self ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :list[tuple[T, int]] = []
__SCREAMING_SNAKE_CASE :dict[T, int] = {}
__SCREAMING_SNAKE_CASE :int = 0
def __len__( self ) -> List[str]:
"""simple docstring"""
return self.elements
def __repr__( self ) -> Union[str, Any]:
"""simple docstring"""
return str(self.heap )
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
return self.elements == 0
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
self.heap.append((elem, weight) )
__SCREAMING_SNAKE_CASE :Dict = self.elements
self.elements += 1
self._bubble_up(lowerCAmelCase__ )
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
if self.elements > 1:
self._swap_nodes(0 ,self.elements - 1 )
__SCREAMING_SNAKE_CASE :str = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.heap[0]
self._bubble_down(lowerCAmelCase__ )
return elem
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = self.position_map[elem]
__SCREAMING_SNAKE_CASE :Dict = (elem, weight)
if position > 0:
__SCREAMING_SNAKE_CASE :Tuple = get_parent_position(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE :Any = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(lowerCAmelCase__ )
else:
self._bubble_down(lowerCAmelCase__ )
else:
self._bubble_down(lowerCAmelCase__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = self.position_map[elem]
if curr_pos == 0:
return None
__SCREAMING_SNAKE_CASE :Dict = get_parent_position(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE :int = self.heap[curr_pos]
__SCREAMING_SNAKE_CASE :List[Any] = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(lowerCAmelCase__ ,lowerCAmelCase__ )
return self._bubble_up(lowerCAmelCase__ )
return None
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = self.position_map[elem]
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.heap[curr_pos]
__SCREAMING_SNAKE_CASE :Optional[Any] = get_child_left_position(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE :List[Any] = get_child_right_position(lowerCAmelCase__ )
if child_left_position < self.elements and child_right_position < self.elements:
__SCREAMING_SNAKE_CASE :List[str] = self.heap[child_left_position]
__SCREAMING_SNAKE_CASE :List[Any] = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(lowerCAmelCase__ ,lowerCAmelCase__ )
return self._bubble_down(lowerCAmelCase__ )
if child_left_position < self.elements:
__SCREAMING_SNAKE_CASE :Tuple = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(lowerCAmelCase__ ,lowerCAmelCase__ )
return self._bubble_down(lowerCAmelCase__ )
else:
return None
if child_right_position < self.elements:
__SCREAMING_SNAKE_CASE :str = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(lowerCAmelCase__ ,lowerCAmelCase__ )
return self._bubble_down(lowerCAmelCase__ )
return None
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = self.heap[nodea_pos][0]
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.heap[nodea_pos][0]
__SCREAMING_SNAKE_CASE :List[Any] = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__SCREAMING_SNAKE_CASE :Any = nodea_pos
__SCREAMING_SNAKE_CASE :int = nodea_pos
class _SCREAMING_SNAKE_CASE( Generic[T] ):
def __init__( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :dict[T, dict[T, int]] = {}
__SCREAMING_SNAKE_CASE :int = 0
def __repr__( self ) -> Any:
"""simple docstring"""
return str(self.connections )
def __len__( self ) -> Optional[Any]:
"""simple docstring"""
return self.nodes
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
if node not in self.connections:
__SCREAMING_SNAKE_CASE :int = {}
self.nodes += 1
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
self.add_node(lowerCAmelCase__ )
self.add_node(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE :Dict = weight
__SCREAMING_SNAKE_CASE :List[str] = weight
def __lowerCamelCase ( a_ : Dict , ) -> Any:
__SCREAMING_SNAKE_CASE :dict[T, int] = {node: maxsize for node in graph.connections}
__SCREAMING_SNAKE_CASE :dict[T, T | None] = {node: None for node in graph.connections}
__SCREAMING_SNAKE_CASE :MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(_UpperCAmelCase , _UpperCAmelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
__SCREAMING_SNAKE_CASE :List[str] = priority_queue.extract_min()
__SCREAMING_SNAKE_CASE :Optional[Any] = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__SCREAMING_SNAKE_CASE :Any = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_UpperCAmelCase , dist[neighbour] )
__SCREAMING_SNAKE_CASE :List[Any] = node
# running prim's algorithm
while not priority_queue.is_empty():
__SCREAMING_SNAKE_CASE :Tuple = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__SCREAMING_SNAKE_CASE :List[str] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_UpperCAmelCase , dist[neighbour] )
__SCREAMING_SNAKE_CASE :int = node
return dist, parent | 191 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
lowerCAmelCase : Optional[int] = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
lowerCAmelCase : Optional[Any] = {
"""allenai/longformer-base-4096""": 4096,
"""allenai/longformer-large-4096""": 4096,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 4096,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 4096,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def A_ ( ):
SCREAMING_SNAKE_CASE_: Any = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE_: Tuple = bs[:]
SCREAMING_SNAKE_CASE_: str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCAmelCase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_: Optional[int] = [chr(_UpperCAmelCase ) for n in cs]
return dict(zip(_UpperCAmelCase , _UpperCAmelCase ) )
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = set()
SCREAMING_SNAKE_CASE_: Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_: Tuple = char
return pairs
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Any = VOCAB_FILES_NAMES
_UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]="replace" , lowerCAmelCase__ : Optional[Any]="<s>" , lowerCAmelCase__ : int="</s>" , lowerCAmelCase__ : Optional[Any]="</s>" , lowerCAmelCase__ : int="<s>" , lowerCAmelCase__ : Optional[Any]="<unk>" , lowerCAmelCase__ : List[Any]="<pad>" , lowerCAmelCase__ : Any="<mask>" , lowerCAmelCase__ : Union[str, Any]=False , **lowerCAmelCase__ : Tuple , ):
SCREAMING_SNAKE_CASE_: int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else bos_token
SCREAMING_SNAKE_CASE_: str = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else eos_token
SCREAMING_SNAKE_CASE_: Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else sep_token
SCREAMING_SNAKE_CASE_: Union[str, Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else cls_token
SCREAMING_SNAKE_CASE_: int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else unk_token
SCREAMING_SNAKE_CASE_: Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_: Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="utf-8") as vocab_handle:
SCREAMING_SNAKE_CASE_: Tuple = json.load(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_: Optional[Any] = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_: List[Any] = bytes_to_unicode()
SCREAMING_SNAKE_CASE_: Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="utf-8") as merges_handle:
SCREAMING_SNAKE_CASE_: List[Any] = merges_handle.read().split("\n")[1:-1]
SCREAMING_SNAKE_CASE_: str = [tuple(merge.split()) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_: List[Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__))))
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_: List[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+")
@property
def _SCREAMING_SNAKE_CASE ( self : int):
return len(self.encoder)
def _SCREAMING_SNAKE_CASE ( self : int):
return dict(self.encoder , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : List[str]):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_: Optional[int] = tuple(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = get_pairs(lowerCAmelCase__)
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_: int = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__: self.bpe_ranks.get(lowerCAmelCase__ , float("inf")))
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = bigram
SCREAMING_SNAKE_CASE_: Optional[int] = []
SCREAMING_SNAKE_CASE_: List[Any] = 0
while i < len(lowerCAmelCase__):
try:
SCREAMING_SNAKE_CASE_: List[Any] = word.index(lowerCAmelCase__ , lowerCAmelCase__)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
SCREAMING_SNAKE_CASE_: Tuple = j
if word[i] == first and i < len(lowerCAmelCase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
SCREAMING_SNAKE_CASE_: str = tuple(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = new_word
if len(lowerCAmelCase__) == 1:
break
else:
SCREAMING_SNAKE_CASE_: Dict = get_pairs(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = " ".join(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = word
return word
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Tuple):
SCREAMING_SNAKE_CASE_: Optional[Any] = []
for token in re.findall(self.pat , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: str = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__).split(" "))
return bpe_tokens
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Tuple):
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Union[str, Any]):
return self.decoder.get(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Optional[int]):
SCREAMING_SNAKE_CASE_: Any = "".join(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8" , errors=self.errors)
return text
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None):
if not os.path.isdir(lowerCAmelCase__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
SCREAMING_SNAKE_CASE_: Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
SCREAMING_SNAKE_CASE_: Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(lowerCAmelCase__ , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__) + "\n")
SCREAMING_SNAKE_CASE_: List[Any] = 0
with open(lowerCAmelCase__ , "w" , encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
SCREAMING_SNAKE_CASE_: List[Any] = token_index
writer.write(" ".join(lowerCAmelCase__) + "\n")
index += 1
return vocab_file, merge_file
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_: Optional[int] = [self.cls_token_id]
SCREAMING_SNAKE_CASE_: Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__)
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__)) + [1]
return [1] + ([0] * len(lowerCAmelCase__)) + [1, 1] + ([0] * len(lowerCAmelCase__)) + [1]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_: Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str]=False , **lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: List[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_: Optional[Any] = " " + text
return (text, kwargs)
| 13 | 0 |
def lowerCAmelCase_ ( snake_case_ ):
_A : List[Any] = generate_pascal_triangle(_UpperCAmelCase )
for row_idx in range(_UpperCAmelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=""" """ )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx],end=""" """ )
else:
print(triangle[row_idx][col_idx],end="""""" )
print()
def lowerCAmelCase_ ( snake_case_ ):
if not isinstance(_UpperCAmelCase,_UpperCAmelCase ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
_A : list[list[int]] = []
for current_row_idx in range(_UpperCAmelCase ):
_A : List[Any] = populate_current_row(_UpperCAmelCase,_UpperCAmelCase )
triangle.append(_UpperCAmelCase )
return triangle
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Dict = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
_A : Tuple = 1, 1
for current_col_idx in range(1,_UpperCAmelCase ):
calculate_current_element(
_UpperCAmelCase,_UpperCAmelCase,_UpperCAmelCase,_UpperCAmelCase )
return current_row
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,):
_A : str = triangle[current_row_idx - 1][current_col_idx - 1]
_A : Optional[int] = triangle[current_row_idx - 1][current_col_idx]
_A : str = above_to_left_elt + above_to_right_elt
def lowerCAmelCase_ ( snake_case_ ):
if not isinstance(_UpperCAmelCase,_UpperCAmelCase ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
_A : list[list[int]] = [[1]]
for row_index in range(1,_UpperCAmelCase ):
_A : Any = [0] + result[-1] + [0]
_A : Tuple = row_index + 1
# Calculate the number of distinct elements in a row
_A : Any = sum(divmod(_UpperCAmelCase,2 ) )
_A : Optional[int] = [
temp_row[i - 1] + temp_row[i] for i in range(1,distinct_elements + 1 )
]
_A : Tuple = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
_A : List[str] = row_first_half + row_second_half
result.append(_UpperCAmelCase )
return result
def lowerCAmelCase_ ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(snake_case_,snake_case_ ) -> None:
_A : int = f'''{func.__name__}({value})'''
_A : List[str] = timeit(f'''__main__.{call}''',setup="""import __main__""" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f'''{call:38} -- {timing:.4f} seconds''' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(_UpperCAmelCase,_UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 26 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[str]):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
SCREAMING_SNAKE_CASE_: Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]]
SCREAMING_SNAKE_CASE_: Any = DisjunctiveConstraint(lowerCAmelCase__)
self.assertTrue(isinstance(dc.token_ids , lowerCAmelCase__))
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]]))
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])])
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
SCREAMING_SNAKE_CASE_: Union[str, Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint(lowerCAmelCase__) # fails here
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: List[str] = [[1, 2, 3], [1, 2, 4]]
SCREAMING_SNAKE_CASE_: Tuple = DisjunctiveConstraint(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = dc.update(1)
SCREAMING_SNAKE_CASE_: Dict = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = dc.update(2)
SCREAMING_SNAKE_CASE_: Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = dc.update(3)
SCREAMING_SNAKE_CASE_: Tuple = stepped is True and completed is True and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3])
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
SCREAMING_SNAKE_CASE_: List[Any] = DisjunctiveConstraint(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = dc.update(4)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2, 4])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5])
dc.reset()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 3)
self.assertTrue(dc.current_seq == [1])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 2)
self.assertTrue(dc.current_seq == [1, 2])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.remaining() == 0)
self.assertTrue(dc.current_seq == [1, 2, 5])
| 13 | 0 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_snake_case = 16
_snake_case = 32
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 16 ):
'''simple docstring'''
lowerCamelCase : Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
lowerCamelCase : List[Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(SCREAMING_SNAKE_CASE_ ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase : Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase : Tuple = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase : Union[str, Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(SCREAMING_SNAKE_CASE_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase : List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase : List[Any] = 16
elif accelerator.mixed_precision != "no":
lowerCamelCase : Optional[Any] = 8
else:
lowerCamelCase : List[str] = None
return tokenizer.pad(
_UpperCAmelCase , padding="longest" , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
lowerCamelCase : Dict = DataLoader(
tokenized_datasets["train"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=_UpperCAmelCase )
lowerCamelCase : Union[str, Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=(accelerator.mixed_precision == "fp8") , )
return train_dataloader, eval_dataloader
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase : int = config["lr"]
lowerCamelCase : Any = int(config["num_epochs"] )
lowerCamelCase : Optional[int] = int(config["seed"] )
lowerCamelCase : List[Any] = int(config["batch_size"] )
lowerCamelCase : List[str] = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
lowerCamelCase : Optional[int] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCamelCase : Tuple = batch_size // MAX_GPU_BATCH_SIZE
lowerCamelCase : Dict = MAX_GPU_BATCH_SIZE
set_seed(_UpperCAmelCase )
lowerCamelCase : str = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase : List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase : Tuple = model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase : Optional[int] = AdamW(params=model.parameters() , lr=_UpperCAmelCase )
# Instantiate scheduler
lowerCamelCase : Optional[int] = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase : Optional[int] = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase : Tuple = model(**_UpperCAmelCase )
lowerCamelCase : Optional[int] = outputs.loss
lowerCamelCase : Tuple = loss / gradient_accumulation_steps
accelerator.backward(_UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase : Optional[int] = model(**_UpperCAmelCase )
lowerCamelCase : int = outputs.logits.argmax(dim=-1 )
lowerCamelCase : int = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
lowerCamelCase : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , _UpperCAmelCase )
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : Any = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
lowerCamelCase : Optional[Any] = parser.parse_args()
lowerCamelCase : Optional[int] = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 283 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = XGLMTokenizer
_UpperCAmelCase : List[Any] = XGLMTokenizerFast
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : Tuple = True
def _SCREAMING_SNAKE_CASE ( self : Tuple):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_: List[Any] = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
tokenizer.save_pretrained(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Optional[Any] = "<pad>"
SCREAMING_SNAKE_CASE_: int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__) , lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__) , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Optional[int] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<s>")
self.assertEqual(vocab_keys[1] , "<pad>")
self.assertEqual(len(lowerCAmelCase__) , 1008)
def _SCREAMING_SNAKE_CASE ( self : Any):
self.assertEqual(self.get_tokenizer().vocab_size , 1008)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Optional[int] = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer.tokenize("This is a test")
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE_: List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
SCREAMING_SNAKE_CASE_: Optional[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Any):
return XGLMTokenizer.from_pretrained("facebook/xglm-564M")
def _SCREAMING_SNAKE_CASE ( self : str):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase__ , f.name)
SCREAMING_SNAKE_CASE_: Tuple = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = pickle.dumps(lowerCAmelCase__)
pickle.loads(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : str):
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_: Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE_: List[str] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_: Any = "I was born in 92000, and this is falsé."
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_: str = tokenizer.encode(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: Dict = "Hello World!"
SCREAMING_SNAKE_CASE_: Union[str, Any] = [2, 3_1227, 4447, 35]
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__))
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Union[str, Any] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
SCREAMING_SNAKE_CASE_: Optional[Any] = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__))
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
# fmt: off
SCREAMING_SNAKE_CASE_: str = {
"input_ids": [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="facebook/xglm-564M" , padding=lowerCAmelCase__ , )
| 13 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _UpperCamelCase ( UpperCAmelCase_ ):
'''simple docstring'''
lowerCAmelCase__ = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
lowerCAmelCase__ = '''CIDAS/clipseg-rd64-refined'''
lowerCAmelCase__ = '''image_segmenter'''
lowerCAmelCase__ = CLIPSegForImageSegmentation
lowerCAmelCase__ = ['''image''', '''text''']
lowerCAmelCase__ = ['''image''']
def __init__( self : List[str] , *_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Dict):
'''simple docstring'''
requires_backends(self , ['vision'])
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__)
def __lowerCamelCase ( self : int , _lowerCAmelCase : "Image" , _lowerCAmelCase : str):
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=lowerCAmelCase__ , return_tensors='pt')
def __lowerCamelCase ( self : Optional[Any] , _lowerCAmelCase : Any):
'''simple docstring'''
with torch.no_grad():
__lowercase =self.model(**lowerCAmelCase__).logits
return logits
def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : Optional[int]):
'''simple docstring'''
__lowercase =outputs.cpu().detach().numpy()
__lowercase =0
__lowercase =1
return Image.fromarray((array * 2_5_5).astype(np.uinta))
| 166 |
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_: Optional[int] = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
SCREAMING_SNAKE_CASE_: int = 1 - (matter_density + radiation_density + dark_energy)
SCREAMING_SNAKE_CASE_: Dict = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
SCREAMING_SNAKE_CASE_: Any = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
lowerCAmelCase : List[Any] = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 13 | 0 |
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
A__ = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1]
A__ = 6
A__ = 1
A__ = 1_9_0_1
A__ = 0
while year < 2_0_0_1:
day += 7
if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
A__ = day - days_per_month[month - 2]
elif day > 2_9 and month == 2:
month += 1
A__ = day - 2_9
else:
if day > days_per_month[month - 1]:
month += 1
A__ = day - days_per_month[month - 2]
if month > 1_2:
year += 1
A__ = 1
if year < 2_0_0_1 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 274 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
lowerCAmelCase : int = logging.get_logger(__name__)
# General docstring
lowerCAmelCase : int = """MobileNetV1Config"""
# Base docstring
lowerCAmelCase : List[Any] = """google/mobilenet_v1_1.0_224"""
lowerCAmelCase : Dict = [1, 1024, 7, 7]
# Image classification docstring
lowerCAmelCase : Union[str, Any] = """google/mobilenet_v1_1.0_224"""
lowerCAmelCase : Any = """tabby, tabby cat"""
lowerCAmelCase : List[Any] = [
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
SCREAMING_SNAKE_CASE_: List[str] = {}
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = model.mobilenet_va
else:
SCREAMING_SNAKE_CASE_: int = model
SCREAMING_SNAKE_CASE_: Dict = "MobilenetV1/Conv2d_0/"
SCREAMING_SNAKE_CASE_: str = backbone.conv_stem.convolution.weight
SCREAMING_SNAKE_CASE_: List[str] = backbone.conv_stem.normalization.bias
SCREAMING_SNAKE_CASE_: int = backbone.conv_stem.normalization.weight
SCREAMING_SNAKE_CASE_: List[str] = backbone.conv_stem.normalization.running_mean
SCREAMING_SNAKE_CASE_: Optional[int] = backbone.conv_stem.normalization.running_var
for i in range(13 ):
SCREAMING_SNAKE_CASE_: List[str] = i + 1
SCREAMING_SNAKE_CASE_: Optional[int] = i * 2
SCREAMING_SNAKE_CASE_: Any = backbone.layer[pt_index]
SCREAMING_SNAKE_CASE_: Any = f"MobilenetV1/Conv2d_{tf_index}_depthwise/"
SCREAMING_SNAKE_CASE_: Any = pointer.convolution.weight
SCREAMING_SNAKE_CASE_: Any = pointer.normalization.bias
SCREAMING_SNAKE_CASE_: str = pointer.normalization.weight
SCREAMING_SNAKE_CASE_: Dict = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE_: Optional[Any] = pointer.normalization.running_var
SCREAMING_SNAKE_CASE_: Tuple = backbone.layer[pt_index + 1]
SCREAMING_SNAKE_CASE_: List[str] = f"MobilenetV1/Conv2d_{tf_index}_pointwise/"
SCREAMING_SNAKE_CASE_: int = pointer.convolution.weight
SCREAMING_SNAKE_CASE_: Any = pointer.normalization.bias
SCREAMING_SNAKE_CASE_: Optional[int] = pointer.normalization.weight
SCREAMING_SNAKE_CASE_: Optional[Any] = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE_: Dict = pointer.normalization.running_var
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = "MobilenetV1/Logits/Conv2d_1c_1x1/"
SCREAMING_SNAKE_CASE_: Optional[Any] = model.classifier.weight
SCREAMING_SNAKE_CASE_: Tuple = model.classifier.bias
return tf_to_pt_map
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions." )
raise
# Load weights from TF model
SCREAMING_SNAKE_CASE_: int = tf.train.list_variables(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = {}
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}" )
SCREAMING_SNAKE_CASE_: Any = tf.train.load_variable(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = array
# Build TF to PyTorch weights loading map
SCREAMING_SNAKE_CASE_: Optional[Any] = _build_tf_to_pytorch_map(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for name, pointer in tf_to_pt_map.items():
logger.info(f"Importing {name}" )
if name not in tf_weights:
logger.info(f"{name} not in tf pre-trained weights, skipping" )
continue
SCREAMING_SNAKE_CASE_: int = tf_weights[name]
if "depthwise_weights" in name:
logger.info("Transposing depthwise" )
SCREAMING_SNAKE_CASE_: int = np.transpose(_UpperCAmelCase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("Transposing" )
if len(pointer.shape ) == 2: # copying into linear layer
SCREAMING_SNAKE_CASE_: List[str] = array.squeeze().transpose()
else:
SCREAMING_SNAKE_CASE_: Any = np.transpose(_UpperCAmelCase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" )
logger.info(f"Initialize PyTorch weight {name} {array.shape}" )
SCREAMING_SNAKE_CASE_: int = torch.from_numpy(_UpperCAmelCase )
tf_weights.pop(_UpperCAmelCase , _UpperCAmelCase )
tf_weights.pop(name + "/RMSProp" , _UpperCAmelCase )
tf_weights.pop(name + "/RMSProp_1" , _UpperCAmelCase )
tf_weights.pop(name + "/ExponentialMovingAverage" , _UpperCAmelCase )
logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" )
return model
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = features.shape[-2:]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = conv_layer.stride
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = conv_layer.kernel_size
if in_height % stride_height == 0:
SCREAMING_SNAKE_CASE_: int = max(kernel_height - stride_height , 0 )
else:
SCREAMING_SNAKE_CASE_: Tuple = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
SCREAMING_SNAKE_CASE_: str = max(kernel_width - stride_width , 0 )
else:
SCREAMING_SNAKE_CASE_: Dict = max(kernel_width - (in_width % stride_width) , 0 )
SCREAMING_SNAKE_CASE_: str = pad_along_width // 2
SCREAMING_SNAKE_CASE_: Union[str, Any] = pad_along_width - pad_left
SCREAMING_SNAKE_CASE_: int = pad_along_height // 2
SCREAMING_SNAKE_CASE_: Tuple = pad_along_height - pad_top
SCREAMING_SNAKE_CASE_: Union[str, Any] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_UpperCAmelCase , _UpperCAmelCase , "constant" , 0.0 )
class __lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : MobileNetVaConfig , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[bool or str] = True , ):
super().__init__()
SCREAMING_SNAKE_CASE_: Optional[int] = config
if in_channels % groups != 0:
raise ValueError(F"Input channels ({in_channels}) are not divisible by {groups} groups.")
if out_channels % groups != 0:
raise ValueError(F"Output channels ({out_channels}) are not divisible by {groups} groups.")
SCREAMING_SNAKE_CASE_: int = 0 if config.tf_padding else int((kernel_size - 1) / 2)
SCREAMING_SNAKE_CASE_: Union[str, Any] = nn.Convad(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=lowerCAmelCase__ , groups=lowerCAmelCase__ , bias=lowerCAmelCase__ , padding_mode="zeros" , )
if use_normalization:
SCREAMING_SNAKE_CASE_: str = nn.BatchNormad(
num_features=lowerCAmelCase__ , eps=config.layer_norm_eps , momentum=0.9997 , affine=lowerCAmelCase__ , track_running_stats=lowerCAmelCase__ , )
else:
SCREAMING_SNAKE_CASE_: str = None
if use_activation:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Dict = ACTaFN[use_activation]
elif isinstance(config.hidden_act , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Dict = ACTaFN[config.hidden_act]
else:
SCREAMING_SNAKE_CASE_: Any = config.hidden_act
else:
SCREAMING_SNAKE_CASE_: int = None
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : torch.Tensor):
if self.config.tf_padding:
SCREAMING_SNAKE_CASE_: Union[str, Any] = apply_tf_padding(lowerCAmelCase__ , self.convolution)
SCREAMING_SNAKE_CASE_: Optional[int] = self.convolution(lowerCAmelCase__)
if self.normalization is not None:
SCREAMING_SNAKE_CASE_: int = self.normalization(lowerCAmelCase__)
if self.activation is not None:
SCREAMING_SNAKE_CASE_: List[Any] = self.activation(lowerCAmelCase__)
return features
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : List[str] = MobileNetVaConfig
_UpperCAmelCase : List[Any] = load_tf_weights_in_mobilenet_va
_UpperCAmelCase : List[Any] = '''mobilenet_v1'''
_UpperCAmelCase : int = '''pixel_values'''
_UpperCAmelCase : List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Union[nn.Linear, nn.Convad]):
if isinstance(lowerCAmelCase__ , (nn.Linear, nn.Convad)):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowerCAmelCase__ , nn.BatchNormad):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
lowerCAmelCase : Any = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowerCAmelCase : List[str] = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , UpperCAmelCase_ , )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase__ : MobileNetVaConfig , lowerCAmelCase__ : bool = True):
super().__init__(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = config
SCREAMING_SNAKE_CASE_: Union[str, Any] = 32
SCREAMING_SNAKE_CASE_: Dict = max(int(depth * config.depth_multiplier) , config.min_depth)
SCREAMING_SNAKE_CASE_: Tuple = MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=config.num_channels , out_channels=lowerCAmelCase__ , kernel_size=3 , stride=2 , )
SCREAMING_SNAKE_CASE_: Optional[int] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
SCREAMING_SNAKE_CASE_: str = nn.ModuleList()
for i in range(13):
SCREAMING_SNAKE_CASE_: List[Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
SCREAMING_SNAKE_CASE_: str = max(int(depth * config.depth_multiplier) , config.min_depth)
self.layer.append(
MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=3 , stride=strides[i] , groups=lowerCAmelCase__ , ))
self.layer.append(
MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=1 , ))
SCREAMING_SNAKE_CASE_: List[str] = nn.AdaptiveAvgPoolad((1, 1)) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : str):
raise NotImplementedError
@add_start_docstrings_to_model_forward(lowerCAmelCase__)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE_: Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_: Any = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
SCREAMING_SNAKE_CASE_: Optional[Any] = self.conv_stem(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
SCREAMING_SNAKE_CASE_: Tuple = layer_module(lowerCAmelCase__)
if output_hidden_states:
SCREAMING_SNAKE_CASE_: Optional[int] = all_hidden_states + (hidden_states,)
SCREAMING_SNAKE_CASE_: Optional[Any] = hidden_states
if self.pooler is not None:
SCREAMING_SNAKE_CASE_: int = torch.flatten(self.pooler(lowerCAmelCase__) , start_dim=1)
else:
SCREAMING_SNAKE_CASE_: List[str] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None)
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ , )
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , UpperCAmelCase_ , )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : MobileNetVaConfig):
super().__init__(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = config.num_labels
SCREAMING_SNAKE_CASE_: Dict = MobileNetVaModel(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
SCREAMING_SNAKE_CASE_: str = nn.Dropout(config.classifier_dropout_prob , inplace=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = nn.Linear(lowerCAmelCase__ , config.num_labels) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE_: List[str] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_: List[str] = self.mobilenet_va(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE_: Tuple = self.classifier(self.dropout(lowerCAmelCase__))
SCREAMING_SNAKE_CASE_: Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE_: List[Any] = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE_: int = "single_label_classification"
else:
SCREAMING_SNAKE_CASE_: str = "multi_label_classification"
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE_: Dict = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE_: Any = loss_fct(logits.squeeze() , labels.squeeze())
else:
SCREAMING_SNAKE_CASE_: int = loss_fct(lowerCAmelCase__ , lowerCAmelCase__)
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE_: Any = CrossEntropyLoss()
SCREAMING_SNAKE_CASE_: Dict = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE_: Dict = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE_: Dict = loss_fct(lowerCAmelCase__ , lowerCAmelCase__)
if not return_dict:
SCREAMING_SNAKE_CASE_: int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states , )
| 13 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
UpperCAmelCase : Tuple = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
UpperCAmelCase : Optional[int] = model(lowerCAmelCase__ )["last_hidden_state"]
UpperCAmelCase : Optional[Any] = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , lowerCAmelCase__ )
# compare the actual values for a slice.
UpperCAmelCase : str = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 311 |
def A_ ( _UpperCAmelCase , _UpperCAmelCase = False ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = f"Expected string as input, found {type(_UpperCAmelCase )}"
raise ValueError(_UpperCAmelCase )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = f"Expected boolean as use_pascal parameter, found {type(_UpperCAmelCase )}"
raise ValueError(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple = input_str.split("_" )
SCREAMING_SNAKE_CASE_: str = 0 if use_pascal else 1
SCREAMING_SNAKE_CASE_: int = words[start_index:]
SCREAMING_SNAKE_CASE_: List[str] = [word[0].upper() + word[1:] for word in words_to_capitalize]
SCREAMING_SNAKE_CASE_: List[Any] = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 13 | 0 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class A :
def __init__(self : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any]=1_3 , __UpperCAmelCase : Optional[int]=7 , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Union[str, Any]=9_9 , __UpperCAmelCase : Union[str, Any]=6_4 , __UpperCAmelCase : Dict=3_2 , __UpperCAmelCase : int=5 , __UpperCAmelCase : Optional[int]=4 , __UpperCAmelCase : Optional[int]=3_7 , __UpperCAmelCase : List[str]="gelu" , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : List[str]=5_1_2 , __UpperCAmelCase : str=1_6 , __UpperCAmelCase : List[Any]=2 , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : Optional[int]=3 , __UpperCAmelCase : Any=4 , __UpperCAmelCase : Tuple=None , ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_input_mask
UpperCAmelCase__ = use_token_type_ids
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = embedding_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = num_choices
UpperCAmelCase__ = scope
def lowercase_ (self : Optional[int] ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ (self : int ) -> Dict:
"""simple docstring"""
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
def lowercase_ (self : int , __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = MegatronBertModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase__ = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
UpperCAmelCase__ = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
UpperCAmelCase__ = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase_ (self : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = MegatronBertForMaskedLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase__ = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ (self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = MegatronBertForCausalLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase__ = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ (self : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = MegatronBertForNextSentencePrediction(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase__ = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowercase_ (self : List[str] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = MegatronBertForPreTraining(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase__ = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , next_sentence_label=lowerCAmelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowercase_ (self : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = MegatronBertForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase__ = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = MegatronBertForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase__ = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ (self : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = MegatronBertForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase__ = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ (self : int , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.num_choices
UpperCAmelCase__ = MegatronBertForMultipleChoice(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase__ = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ (self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
UpperCAmelCase__
) = config_and_inputs
UpperCAmelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Any = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : Optional[int] = (
{
'''feature-extraction''': MegatronBertModel,
'''fill-mask''': MegatronBertForMaskedLM,
'''question-answering''': MegatronBertForQuestionAnswering,
'''text-classification''': MegatronBertForSequenceClassification,
'''text-generation''': MegatronBertForCausalLM,
'''token-classification''': MegatronBertForTokenClassification,
'''zero-shot''': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : List[Any] = True
# test_resize_embeddings = False
__UpperCAmelCase : str = False
def lowercase_ (self : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int]=False ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class in get_values(lowerCAmelCase__ ):
UpperCAmelCase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase__ )
UpperCAmelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def lowercase_ (self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = MegatronBertModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=3_7 )
def lowercase_ (self : Dict ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ (self : int ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*lowerCAmelCase__ )
def lowercase_ (self : Any ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowerCAmelCase__ )
def lowercase_ (self : int ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowerCAmelCase__ )
def lowercase_ (self : List[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowerCAmelCase__ )
def lowercase_ (self : List[str] ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowerCAmelCase__ )
def lowercase_ (self : Optional[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowerCAmelCase__ )
def lowercase_ (self : List[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowerCAmelCase__ )
def lowercase_ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowerCAmelCase__ )
def lowerCAmelCase_ ( __A ) -> str:
'''simple docstring'''
return torch.tensor(
_UpperCAmelCase, dtype=torch.long, device=_UpperCAmelCase, )
UpperCamelCase__ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
@slow
@unittest.skip("Model is not available." )
def lowercase_ (self : Any ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = "nvidia/megatron-bert-uncased-345m"
if "MYDIR" in os.environ:
UpperCAmelCase__ = os.path.join(os.environ["MYDIR"] , lowerCAmelCase__ )
UpperCAmelCase__ = MegatronBertModel.from_pretrained(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.half()
UpperCAmelCase__ = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
UpperCAmelCase__ = model(lowerCAmelCase__ )[0]
UpperCAmelCase__ = torch.Size((1, 9, 1_0_2_4) )
self.assertEqual(output.shape , lowerCAmelCase__ )
UpperCAmelCase__ = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
UpperCAmelCase__ = output[0, ii, jj]
UpperCAmelCase__ = expected[3 * ii + jj]
UpperCAmelCase__ = "ii={} jj={} a={} b={}".format(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
self.assertTrue(math.isclose(lowerCAmelCase__ , lowerCAmelCase__ , rel_tol=lowerCAmelCase__ , abs_tol=lowerCAmelCase__ ) , msg=lowerCAmelCase__ )
| 65 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def A_ ( _UpperCAmelCase , _UpperCAmelCase=10 ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = []
for _ in range(_UpperCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def A_ ( _UpperCAmelCase , _UpperCAmelCase=10 ):
SCREAMING_SNAKE_CASE_: List[str] = []
for step in range(_UpperCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_: Optional[int] = os.path.join(_UpperCAmelCase , "schedule.bin" )
torch.save(scheduler.state_dict() , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.load(_UpperCAmelCase )
scheduler.load_state_dict(_UpperCAmelCase )
return lrs
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple):
self.assertEqual(len(lowerCAmelCase__) , len(lowerCAmelCase__))
for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__):
self.assertAlmostEqual(lowerCAmelCase__ , lowerCAmelCase__ , delta=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = torch.tensor([0.4, 0.2, -0.5])
SCREAMING_SNAKE_CASE_: Optional[Any] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
SCREAMING_SNAKE_CASE_: int = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0)
for _ in range(100):
SCREAMING_SNAKE_CASE_: Dict = criterion(lowerCAmelCase__ , lowerCAmelCase__)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2)
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = torch.tensor([0.4, 0.2, -0.5])
SCREAMING_SNAKE_CASE_: Any = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
SCREAMING_SNAKE_CASE_: int = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowerCAmelCase__ , weight_decay=0.0 , relative_step=lowerCAmelCase__ , scale_parameter=lowerCAmelCase__ , warmup_init=lowerCAmelCase__ , )
for _ in range(1000):
SCREAMING_SNAKE_CASE_: List[Any] = criterion(lowerCAmelCase__ , lowerCAmelCase__)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2)
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = nn.Linear(50 , 50 ) if is_torch_available() else None
_UpperCAmelCase : List[Any] = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
_UpperCAmelCase : Optional[Any] = 10
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any]=None):
self.assertEqual(len(lowerCAmelCase__) , len(lowerCAmelCase__))
for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__):
self.assertAlmostEqual(lowerCAmelCase__ , lowerCAmelCase__ , delta=lowerCAmelCase__ , msg=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Dict = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
SCREAMING_SNAKE_CASE_: Dict = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = data
SCREAMING_SNAKE_CASE_: List[Any] = scheduler_func(self.optimizer , **lowerCAmelCase__)
self.assertEqual(len([scheduler.get_lr()[0]]) , 1)
SCREAMING_SNAKE_CASE_: int = unwrap_schedule(lowerCAmelCase__ , self.num_steps)
self.assertListAlmostEqual(
lowerCAmelCase__ , lowerCAmelCase__ , tol=1E-2 , msg=F"failed for {scheduler_func} in normal scheduler" , )
SCREAMING_SNAKE_CASE_: List[str] = scheduler_func(self.optimizer , **lowerCAmelCase__)
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(lowerCAmelCase__) # wrap to test picklability of the schedule
SCREAMING_SNAKE_CASE_: Tuple = unwrap_and_save_reload_schedule(lowerCAmelCase__ , self.num_steps)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ , msg=F"failed for {scheduler_func} in save and reload")
class __lowercase :
"""simple docstring"""
def __init__( self : str , lowerCAmelCase__ : List[str]):
SCREAMING_SNAKE_CASE_: List[Any] = fn
def __call__( self : Optional[int] , *lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : Tuple):
return self.fn(*lowerCAmelCase__ , **lowerCAmelCase__)
@classmethod
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: str = list(map(self , scheduler.lr_lambdas))
| 13 | 0 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def a ( _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : str = checkpoint
__UpperCAmelCase : Union[str, Any] = {}
__UpperCAmelCase : Any = vae_state_dict["encoder.conv_in.weight"]
__UpperCAmelCase : Tuple = vae_state_dict["encoder.conv_in.bias"]
__UpperCAmelCase : Dict = vae_state_dict["encoder.conv_out.weight"]
__UpperCAmelCase : Optional[Any] = vae_state_dict["encoder.conv_out.bias"]
__UpperCAmelCase : Any = vae_state_dict["encoder.norm_out.weight"]
__UpperCAmelCase : Dict = vae_state_dict["encoder.norm_out.bias"]
__UpperCAmelCase : Tuple = vae_state_dict["decoder.conv_in.weight"]
__UpperCAmelCase : Union[str, Any] = vae_state_dict["decoder.conv_in.bias"]
__UpperCAmelCase : int = vae_state_dict["decoder.conv_out.weight"]
__UpperCAmelCase : Optional[int] = vae_state_dict["decoder.conv_out.bias"]
__UpperCAmelCase : Any = vae_state_dict["decoder.norm_out.weight"]
__UpperCAmelCase : Optional[int] = vae_state_dict["decoder.norm_out.bias"]
__UpperCAmelCase : List[Any] = vae_state_dict["quant_conv.weight"]
__UpperCAmelCase : int = vae_state_dict["quant_conv.bias"]
__UpperCAmelCase : List[Any] = vae_state_dict["post_quant_conv.weight"]
__UpperCAmelCase : Optional[int] = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
__UpperCAmelCase : int = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
__UpperCAmelCase : Any = {
layer_id: [key for key in vae_state_dict if f'down.{layer_id}' in key] for layer_id in range(_UpperCAmelCase )
}
# Retrieves the keys for the decoder up blocks only
__UpperCAmelCase : Optional[int] = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
__UpperCAmelCase : List[Any] = {
layer_id: [key for key in vae_state_dict if f'up.{layer_id}' in key] for layer_id in range(_UpperCAmelCase )
}
for i in range(_UpperCAmelCase ):
__UpperCAmelCase : List[Any] = [key for key in down_blocks[i] if f'down.{i}' in key and f'down.{i}.downsample' not in key]
if f'encoder.down.{i}.downsample.conv.weight' in vae_state_dict:
__UpperCAmelCase : str = vae_state_dict.pop(
f'encoder.down.{i}.downsample.conv.weight' )
__UpperCAmelCase : Dict = vae_state_dict.pop(
f'encoder.down.{i}.downsample.conv.bias' )
__UpperCAmelCase : Dict = renew_vae_resnet_paths(_UpperCAmelCase )
__UpperCAmelCase : str = {"old": f'down.{i}.block', "new": f'down_blocks.{i}.resnets'}
assign_to_checkpoint(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , additional_replacements=[meta_path] , config=_UpperCAmelCase )
__UpperCAmelCase : Optional[int] = [key for key in vae_state_dict if "encoder.mid.block" in key]
__UpperCAmelCase : str = 2
for i in range(1 , num_mid_res_blocks + 1 ):
__UpperCAmelCase : Any = [key for key in mid_resnets if f'encoder.mid.block_{i}' in key]
__UpperCAmelCase : List[Any] = renew_vae_resnet_paths(_UpperCAmelCase )
__UpperCAmelCase : int = {"old": f'mid.block_{i}', "new": f'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , additional_replacements=[meta_path] , config=_UpperCAmelCase )
__UpperCAmelCase : List[Any] = [key for key in vae_state_dict if "encoder.mid.attn" in key]
__UpperCAmelCase : List[str] = renew_vae_attention_paths(_UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , additional_replacements=[meta_path] , config=_UpperCAmelCase )
conv_attn_to_linear(_UpperCAmelCase )
for i in range(_UpperCAmelCase ):
__UpperCAmelCase : Union[str, Any] = num_up_blocks - 1 - i
__UpperCAmelCase : Union[str, Any] = [
key for key in up_blocks[block_id] if f'up.{block_id}' in key and f'up.{block_id}.upsample' not in key
]
if f'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict:
__UpperCAmelCase : Dict = vae_state_dict[
f'decoder.up.{block_id}.upsample.conv.weight'
]
__UpperCAmelCase : Optional[Any] = vae_state_dict[
f'decoder.up.{block_id}.upsample.conv.bias'
]
__UpperCAmelCase : Optional[Any] = renew_vae_resnet_paths(_UpperCAmelCase )
__UpperCAmelCase : List[str] = {"old": f'up.{block_id}.block', "new": f'up_blocks.{i}.resnets'}
assign_to_checkpoint(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , additional_replacements=[meta_path] , config=_UpperCAmelCase )
__UpperCAmelCase : List[str] = [key for key in vae_state_dict if "decoder.mid.block" in key]
__UpperCAmelCase : Tuple = 2
for i in range(1 , num_mid_res_blocks + 1 ):
__UpperCAmelCase : Dict = [key for key in mid_resnets if f'decoder.mid.block_{i}' in key]
__UpperCAmelCase : Any = renew_vae_resnet_paths(_UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = {"old": f'mid.block_{i}', "new": f'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , additional_replacements=[meta_path] , config=_UpperCAmelCase )
__UpperCAmelCase : Tuple = [key for key in vae_state_dict if "decoder.mid.attn" in key]
__UpperCAmelCase : Any = renew_vae_attention_paths(_UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , additional_replacements=[meta_path] , config=_UpperCAmelCase )
conv_attn_to_linear(_UpperCAmelCase )
return new_checkpoint
def a ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , ):
'''simple docstring'''
__UpperCAmelCase : Tuple = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
__UpperCAmelCase : Optional[int] = io.BytesIO(r.content )
__UpperCAmelCase : Any = OmegaConf.load(_UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = 5_12
__UpperCAmelCase : Tuple = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
__UpperCAmelCase : Optional[Any] = {}
with safe_open(_UpperCAmelCase , framework='''pt''' , device='''cpu''' ) as f:
for key in f.keys():
__UpperCAmelCase : Any = f.get_tensor(_UpperCAmelCase )
else:
__UpperCAmelCase : Optional[int] = torch.load(_UpperCAmelCase , map_location=_UpperCAmelCase )["state_dict"]
# Convert the VAE model.
__UpperCAmelCase : Optional[int] = create_vae_diffusers_config(_UpperCAmelCase , image_size=_UpperCAmelCase )
__UpperCAmelCase : Optional[int] = custom_convert_ldm_vae_checkpoint(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase : int = AutoencoderKL(**_UpperCAmelCase )
vae.load_state_dict(_UpperCAmelCase )
vae.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
__A =parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 226 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase_ )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : str = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_UpperCAmelCase : ClassVar[Features] = Features({'''audio''': Audio()} )
_UpperCAmelCase : ClassVar[Features] = Features({'''transcription''': Value('''string''' )} )
_UpperCAmelCase : str = "audio"
_UpperCAmelCase : str = "transcription"
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int):
if self.audio_column not in features:
raise ValueError(F"Column {self.audio_column} is not present in features.")
if not isinstance(features[self.audio_column] , lowerCAmelCase__):
raise ValueError(F"Column {self.audio_column} is not an Audio type.")
SCREAMING_SNAKE_CASE_: Tuple = copy.deepcopy(self)
SCREAMING_SNAKE_CASE_: Optional[int] = self.input_schema.copy()
SCREAMING_SNAKE_CASE_: Dict = features[self.audio_column]
SCREAMING_SNAKE_CASE_: int = input_schema
return task_template
@property
def _SCREAMING_SNAKE_CASE ( self : int):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 13 | 0 |
def lowercase_ (A : List[Any] ):
snake_case__ : Optional[Any] = len(_UpperCAmelCase )
for i in range(length - 1 ):
snake_case__ : Dict = i
for k in range(i + 1 , _UpperCAmelCase ):
if collection[k] < collection[least]:
snake_case__ : Tuple = k
if least != i:
snake_case__ : Any = (collection[i], collection[least])
return collection
if __name__ == "__main__":
a_ :Optional[Any] = input("Enter numbers separated by a comma:\n").strip()
a_ :List[str] = [int(item) for item in user_input.split(",")]
print(selection_sort(unsorted))
| 277 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: str = jnp.ones((batch_size, length)) / length
return scores
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Dict = None
SCREAMING_SNAKE_CASE_: str = 20
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(batch_size=2 , length=lowerCAmelCase__)
# tweak scores to not be uniform anymore
SCREAMING_SNAKE_CASE_: List[str] = scores.at[1, 5].set((1 / length) + 0.1) # peak, 1st batch
SCREAMING_SNAKE_CASE_: Any = scores.at[1, 10].set((1 / length) - 0.4) # valley, 1st batch
# compute softmax
SCREAMING_SNAKE_CASE_: Dict = jax.nn.softmax(lowerCAmelCase__ , axis=-1)
SCREAMING_SNAKE_CASE_: Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5)
SCREAMING_SNAKE_CASE_: List[str] = FlaxTemperatureLogitsWarper(temperature=1.3)
SCREAMING_SNAKE_CASE_: str = jax.nn.softmax(temp_dist_warper_sharper(lowerCAmelCase__ , scores.copy() , cur_len=lowerCAmelCase__) , axis=-1)
SCREAMING_SNAKE_CASE_: int = jax.nn.softmax(temp_dist_warper_smoother(lowerCAmelCase__ , scores.copy() , cur_len=lowerCAmelCase__) , axis=-1)
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3))
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3))
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max())
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min())
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max())
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min())
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: List[str] = None
SCREAMING_SNAKE_CASE_: str = 10
SCREAMING_SNAKE_CASE_: Tuple = 2
# create ramp distribution
SCREAMING_SNAKE_CASE_: Optional[Any] = np.broadcast_to(np.arange(lowerCAmelCase__)[None, :] , (batch_size, vocab_size)).copy()
SCREAMING_SNAKE_CASE_: Dict = ramp_logits[1:, : vocab_size // 2] + vocab_size
SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxTopKLogitsWarper(3)
SCREAMING_SNAKE_CASE_: Dict = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0]).tolist() , 7 * [True] + 3 * [False])
self.assertListEqual(jnp.isinf(scores[1]).tolist() , 2 * [True] + 3 * [False] + 5 * [True])
# check special case
SCREAMING_SNAKE_CASE_: Any = 5
SCREAMING_SNAKE_CASE_: str = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3)
SCREAMING_SNAKE_CASE_: Any = np.broadcast_to(np.arange(lowerCAmelCase__)[None, :] , (batch_size, length)).copy()
SCREAMING_SNAKE_CASE_: Any = top_k_warp_safety_check(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1).tolist() , [2, 2])
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Tuple = None
SCREAMING_SNAKE_CASE_: Dict = 10
SCREAMING_SNAKE_CASE_: Dict = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
SCREAMING_SNAKE_CASE_: Tuple = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]]))
SCREAMING_SNAKE_CASE_: int = FlaxTopPLogitsWarper(0.8)
SCREAMING_SNAKE_CASE_: Optional[Any] = np.exp(top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__))
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
SCREAMING_SNAKE_CASE_: Dict = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]])
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
# check edge cases with negative and extreme logits
SCREAMING_SNAKE_CASE_: Union[str, Any] = np.broadcast_to(np.arange(lowerCAmelCase__)[None, :] , (batch_size, vocab_size)).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
SCREAMING_SNAKE_CASE_: Dict = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
SCREAMING_SNAKE_CASE_: str = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0)
SCREAMING_SNAKE_CASE_: Any = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1).tolist() , [3, 2])
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Tuple = 20
SCREAMING_SNAKE_CASE_: List[str] = 4
SCREAMING_SNAKE_CASE_: Optional[int] = 0
SCREAMING_SNAKE_CASE_: str = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__)
# check that min length is applied at length 5
SCREAMING_SNAKE_CASE_: str = ids_tensor((batch_size, 20) , vocab_size=20)
SCREAMING_SNAKE_CASE_: int = 5
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = min_dist_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf")])
# check that min length is not applied anymore at length 15
SCREAMING_SNAKE_CASE_: List[str] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = 15
SCREAMING_SNAKE_CASE_: Any = min_dist_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertFalse(jnp.isinf(lowerCAmelCase__).any())
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: int = 20
SCREAMING_SNAKE_CASE_: str = 4
SCREAMING_SNAKE_CASE_: List[Any] = 0
SCREAMING_SNAKE_CASE_: Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__)
# check that all scores are -inf except the bos_token_id score
SCREAMING_SNAKE_CASE_: int = ids_tensor((batch_size, 1) , vocab_size=20)
SCREAMING_SNAKE_CASE_: List[str] = 1
SCREAMING_SNAKE_CASE_: Union[str, Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :]).all())
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0]) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
SCREAMING_SNAKE_CASE_: List[Any] = 3
SCREAMING_SNAKE_CASE_: Optional[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertFalse(jnp.isinf(lowerCAmelCase__).any())
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Any = 20
SCREAMING_SNAKE_CASE_: Optional[Any] = 4
SCREAMING_SNAKE_CASE_: Dict = 0
SCREAMING_SNAKE_CASE_: List[Any] = 5
SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__)
# check that all scores are -inf except the eos_token_id when max_length is reached
SCREAMING_SNAKE_CASE_: List[Any] = ids_tensor((batch_size, 4) , vocab_size=20)
SCREAMING_SNAKE_CASE_: Optional[int] = 4
SCREAMING_SNAKE_CASE_: Dict = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :]).all())
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0]) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
SCREAMING_SNAKE_CASE_: List[str] = 3
SCREAMING_SNAKE_CASE_: str = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertFalse(jnp.isinf(lowerCAmelCase__).any())
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: int = 4
SCREAMING_SNAKE_CASE_: List[Any] = 10
SCREAMING_SNAKE_CASE_: int = 15
SCREAMING_SNAKE_CASE_: Dict = 2
SCREAMING_SNAKE_CASE_: int = 1
SCREAMING_SNAKE_CASE_: List[Any] = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_: int = ids_tensor((batch_size, sequence_length) , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = input_ids.copy()
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_: Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5)
SCREAMING_SNAKE_CASE_: Tuple = FlaxTopKLogitsWarper(3)
SCREAMING_SNAKE_CASE_: Optional[int] = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
SCREAMING_SNAKE_CASE_: Optional[int] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = 10
# no processor list
SCREAMING_SNAKE_CASE_: Dict = temp_dist_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = min_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = bos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = eos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# with processor list
SCREAMING_SNAKE_CASE_: str = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
SCREAMING_SNAKE_CASE_: Tuple = processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# scores should be equal
self.assertTrue(jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Optional[int] = 4
SCREAMING_SNAKE_CASE_: int = 10
SCREAMING_SNAKE_CASE_: List[str] = 15
SCREAMING_SNAKE_CASE_: List[Any] = 2
SCREAMING_SNAKE_CASE_: Union[str, Any] = 1
SCREAMING_SNAKE_CASE_: str = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_: Tuple = ids_tensor((batch_size, sequence_length) , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = input_ids.copy()
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_: Dict = FlaxTemperatureLogitsWarper(temperature=0.5)
SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxTopKLogitsWarper(3)
SCREAMING_SNAKE_CASE_: Dict = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
SCREAMING_SNAKE_CASE_: int = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = 10
# no processor list
def run_no_processor_list(lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Any = temp_dist_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = min_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = bos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = eos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
return scores
# with processor list
def run_processor_list(lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: List[str] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
SCREAMING_SNAKE_CASE_: Dict = processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
return scores
SCREAMING_SNAKE_CASE_: str = jax.jit(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = jax.jit(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = jitted_run_no_processor_list(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = jitted_run_processor_list(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
# scores should be equal
self.assertTrue(jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
| 13 | 0 |
from collections.abc import Callable
class __snake_case :
def __init__( self ,snake_case = None ):
'''simple docstring'''
lowercase : list = []
# Stores indexes of each item for supporting updates and deletion.
lowercase : dict = {}
# Stores current size of heap.
lowercase : Optional[Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
lowercase : Any = key or (lambda snake_case : x)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Union[str, Any] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Union[str, Any] = int(2 * i + 2 )
return right if 0 < right < self.size else None
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : str = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
lowercase : List[str] = self.arr[j], self.arr[i]
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Any = self._left(lowerCAmelCase__ )
lowercase : List[Any] = self._right(lowerCAmelCase__ )
lowercase : List[Any] = i
if left is not None and not self._cmp(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowercase : Optional[int] = left
if right is not None and not self._cmp(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowercase : Tuple = right
return valid_parent
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : List[Any] = self._parent(lowerCAmelCase__ )
while parent is not None and not self._cmp(lowerCAmelCase__ ,lowerCAmelCase__ ):
self._swap(lowerCAmelCase__ ,lowerCAmelCase__ )
lowercase : List[str] = parent, self._parent(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Optional[int] = self._get_valid_parent(lowerCAmelCase__ )
while valid_parent != index:
self._swap(lowerCAmelCase__ ,lowerCAmelCase__ )
lowercase : Dict = valid_parent, self._get_valid_parent(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
if item not in self.pos_map:
return
lowercase : Any = self.pos_map[item]
lowercase : int = [item, self.key(lowerCAmelCase__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowerCAmelCase__ )
self._heapify_down(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if item not in self.pos_map:
return
lowercase : Optional[Any] = self.pos_map[item]
del self.pos_map[item]
lowercase : List[str] = self.arr[self.size - 1]
lowercase : Tuple = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowerCAmelCase__ )
self._heapify_down(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Optional[int] = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(lowerCAmelCase__ )] )
else:
lowercase : str = [item, self.key(lowerCAmelCase__ )]
lowercase : List[Any] = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.arr[0] if self.size else None
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def _snake_case( ) -> int:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
import math
import sys
def A_ ( _UpperCAmelCase ):
if number != int(_UpperCAmelCase ):
raise ValueError("the value of input must be a natural number" )
if number < 0:
raise ValueError("the value of input must not be a negative number" )
if number == 0:
return 1
SCREAMING_SNAKE_CASE_: List[str] = [-1] * (number + 1)
SCREAMING_SNAKE_CASE_: str = 0
for i in range(1 , number + 1 ):
SCREAMING_SNAKE_CASE_: str = sys.maxsize
SCREAMING_SNAKE_CASE_: List[Any] = int(math.sqrt(_UpperCAmelCase ) )
for j in range(1 , root + 1 ):
SCREAMING_SNAKE_CASE_: List[str] = 1 + answers[i - (j**2)]
SCREAMING_SNAKE_CASE_: Optional[Any] = min(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase = False )-> Optional[Any]:
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase : str =f'''Expected string as input, found {type(_UpperCAmelCase )}'''
raise ValueError(_UpperCAmelCase )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase : Optional[Any] =f'''Expected boolean as use_pascal parameter, found {type(_UpperCAmelCase )}'''
raise ValueError(_UpperCAmelCase )
UpperCAmelCase : Tuple =input_str.split('''_''' )
UpperCAmelCase : str =0 if use_pascal else 1
UpperCAmelCase : int =words[start_index:]
UpperCAmelCase : List[str] =[word[0].upper() + word[1:] for word in words_to_capitalize]
UpperCAmelCase : List[Any] ="" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 348 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[int] = {"""configuration_wavlm""": ["""WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WavLMConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = [
"""WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WavLMForAudioFrameClassification""",
"""WavLMForCTC""",
"""WavLMForSequenceClassification""",
"""WavLMForXVector""",
"""WavLMModel""",
"""WavLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 13 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _SCREAMING_SNAKE_CASE( UpperCAmelCase_ ):
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ ,'''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ ,'''neck_hidden_sizes''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ ,'''num_attention_heads''' ) )
class _SCREAMING_SNAKE_CASE:
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=13 ,SCREAMING_SNAKE_CASE__=32 ,SCREAMING_SNAKE_CASE__=2 ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=6_40 ,SCREAMING_SNAKE_CASE__=4 ,SCREAMING_SNAKE_CASE__="silu" ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=32 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.1 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=10 ,SCREAMING_SNAKE_CASE__=None ,) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = parent
__SCREAMING_SNAKE_CASE :List[Any] = batch_size
__SCREAMING_SNAKE_CASE :Tuple = image_size
__SCREAMING_SNAKE_CASE :Tuple = patch_size
__SCREAMING_SNAKE_CASE :List[Any] = num_channels
__SCREAMING_SNAKE_CASE :str = last_hidden_size
__SCREAMING_SNAKE_CASE :List[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE :Dict = hidden_act
__SCREAMING_SNAKE_CASE :Optional[Any] = conv_kernel_size
__SCREAMING_SNAKE_CASE :str = output_stride
__SCREAMING_SNAKE_CASE :Any = hidden_dropout_prob
__SCREAMING_SNAKE_CASE :Dict = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE :int = classifier_dropout_prob
__SCREAMING_SNAKE_CASE :Union[str, Any] = use_labels
__SCREAMING_SNAKE_CASE :Union[str, Any] = is_training
__SCREAMING_SNAKE_CASE :Any = num_labels
__SCREAMING_SNAKE_CASE :Tuple = initializer_range
__SCREAMING_SNAKE_CASE :List[str] = scope
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE :List[str] = None
__SCREAMING_SNAKE_CASE :List[Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE :str = ids_tensor([self.batch_size] ,self.num_labels )
__SCREAMING_SNAKE_CASE :Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
__SCREAMING_SNAKE_CASE :List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
return MobileViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,num_attention_heads=self.num_attention_heads ,hidden_act=self.hidden_act ,conv_kernel_size=self.conv_kernel_size ,output_stride=self.output_stride ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,)
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = MobileViTModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE :Tuple = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = self.num_labels
__SCREAMING_SNAKE_CASE :str = MobileViTForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE :Dict = model(lowerCAmelCase__ ,labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = self.num_labels
__SCREAMING_SNAKE_CASE :List[Any] = MobileViTForSemanticSegmentation(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE :Union[str, Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
__SCREAMING_SNAKE_CASE :List[str] = model(lowerCAmelCase__ ,labels=lowerCAmelCase__ )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE :Any = config_and_inputs
__SCREAMING_SNAKE_CASE :int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : str = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : int = False
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = MobileViTModelTester(self )
__SCREAMING_SNAKE_CASE :Union[str, Any] = MobileViTConfigTester(self ,config_class=lowerCAmelCase__ ,has_text_modality=lowerCAmelCase__ )
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViT does not use inputs_embeds''' )
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''MobileViT does not support input and output embeddings''' )
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='''MobileViT does not output attentions''' )
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
pass
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE :List[str] = model_class(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE :Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE :List[Any] = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE :List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,lowerCAmelCase__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
pass
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :Dict = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE :Union[str, Any] = model(**self._prepare_for_class(lowerCAmelCase__ ,lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE :Optional[int] = outputs.hidden_states
__SCREAMING_SNAKE_CASE :Any = 5
self.assertEqual(len(lowerCAmelCase__ ) ,lowerCAmelCase__ )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__SCREAMING_SNAKE_CASE :Optional[Any] = 2
for i in range(len(lowerCAmelCase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) ,[self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] ,)
divisor *= 2
self.assertEqual(self.model_tester.output_stride ,divisor // 2 )
__SCREAMING_SNAKE_CASE :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE :Dict = True
check_hidden_states_output(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE :Optional[int] = True
check_hidden_states_output(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCAmelCase__ )
@slow
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE :Optional[int] = MobileViTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __lowerCamelCase ( ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE :Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
@cached_property
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
return MobileViTImageProcessor.from_pretrained('''apple/mobilevit-xx-small''' ) if is_vision_available() else None
@slow
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = MobileViTForImageClassification.from_pretrained('''apple/mobilevit-xx-small''' ).to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE :int = self.default_image_processor
__SCREAMING_SNAKE_CASE :Union[str, Any] = prepare_img()
__SCREAMING_SNAKE_CASE :List[str] = image_processor(images=lowerCAmelCase__ ,return_tensors='''pt''' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE :int = model(**lowerCAmelCase__ )
# verify the logits
__SCREAMING_SNAKE_CASE :Union[str, Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape ,lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE :Any = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCAmelCase__ ,atol=1E-4 ) )
@slow
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
__SCREAMING_SNAKE_CASE :Optional[Any] = model.to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE :List[Any] = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
__SCREAMING_SNAKE_CASE :Union[str, Any] = prepare_img()
__SCREAMING_SNAKE_CASE :Any = image_processor(images=lowerCAmelCase__ ,return_tensors='''pt''' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE :Optional[int] = model(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = outputs.logits
# verify the logits
__SCREAMING_SNAKE_CASE :Union[str, Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape ,lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = torch.tensor(
[
[[6.9_7_1_3, 6.9_7_8_6, 7.2_4_2_2], [7.2_8_9_3, 7.2_8_2_5, 7.4_4_4_6], [7.6_5_8_0, 7.8_7_9_7, 7.9_4_2_0]],
[[-1_0.6_8_6_9, -1_0.3_2_5_0, -1_0.3_4_7_1], [-1_0.4_2_2_8, -9.9_8_6_8, -9.7_1_3_2], [-1_1.0_4_0_5, -1_1.0_2_2_1, -1_0.7_3_1_8]],
[[-3.3_0_8_9, -2.8_5_3_9, -2.6_7_4_0], [-3.2_7_0_6, -2.5_6_2_1, -2.5_1_0_8], [-3.2_5_3_4, -2.6_6_1_5, -2.6_6_5_1]],
] ,device=lowerCAmelCase__ ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,lowerCAmelCase__ ,atol=1E-4 ) )
@slow
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
__SCREAMING_SNAKE_CASE :Dict = model.to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
__SCREAMING_SNAKE_CASE :Dict = prepare_img()
__SCREAMING_SNAKE_CASE :Optional[Any] = image_processor(images=lowerCAmelCase__ ,return_tensors='''pt''' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE :Union[str, Any] = model(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE :int = outputs.logits.detach().cpu()
__SCREAMING_SNAKE_CASE :Any = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase__ ,target_sizes=[(50, 60)] )
__SCREAMING_SNAKE_CASE :Tuple = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape ,lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE :Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE :Optional[Any] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape ,lowerCAmelCase__ ) | 191 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : List[Any] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_UpperCAmelCase : str = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Any = TextaTextGenerationPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__)
return generator, ["Something to write", "Something else"]
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any):
SCREAMING_SNAKE_CASE_: List[Any] = generator("Something there")
self.assertEqual(lowerCAmelCase__ , [{"generated_text": ANY(lowerCAmelCase__)}])
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there"))
SCREAMING_SNAKE_CASE_: List[Any] = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=lowerCAmelCase__)
self.assertEqual(
lowerCAmelCase__ , [
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
] , )
SCREAMING_SNAKE_CASE_: Dict = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=lowerCAmelCase__)
self.assertEqual(
lowerCAmelCase__ , [
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
] , )
with self.assertRaises(lowerCAmelCase__):
generator(4)
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Optional[int] = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt")
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE_: Union[str, Any] = generator("Something there" , do_sample=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , [{"generated_text": ""}])
SCREAMING_SNAKE_CASE_: Union[str, Any] = 3
SCREAMING_SNAKE_CASE_: Any = generator(
"Something there" , num_return_sequences=lowerCAmelCase__ , num_beams=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Any = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = generator("This is a test" , do_sample=lowerCAmelCase__ , num_return_sequences=2 , return_tensors=lowerCAmelCase__)
self.assertEqual(
lowerCAmelCase__ , [
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
] , )
SCREAMING_SNAKE_CASE_: str = generator.model.config.eos_token_id
SCREAMING_SNAKE_CASE_: Union[str, Any] = "<pad>"
SCREAMING_SNAKE_CASE_: Tuple = generator(
["This is a test", "This is a second test"] , do_sample=lowerCAmelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=lowerCAmelCase__ , )
self.assertEqual(
lowerCAmelCase__ , [
[
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
],
[
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
],
] , )
@require_tf
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf")
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE_: List[Any] = generator("Something there" , do_sample=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , [{"generated_text": ""}])
| 13 | 0 |
class lowercase :
def __init__( self , _a , _a ) -> str:
_A : List[str] = name
_A : Union[str, Any] = val
def __str__( self ) -> Dict:
return F'''{self.__class__.__name__}({self.name}, {self.val})'''
def __lt__( self , _a ) -> int:
return self.val < other.val
class lowercase :
def __init__( self , _a ) -> Union[str, Any]:
_A : str = {}
_A : int = {}
_A : Any = self.build_heap(lowerCAmelCase__ )
def __getitem__( self , _a ) -> Optional[Any]:
return self.get_value(lowerCAmelCase__ )
def a__ ( self , _a ) -> Any:
return (idx - 1) // 2
def a__ ( self , _a ) -> Dict:
return idx * 2 + 1
def a__ ( self , _a ) -> List[Any]:
return idx * 2 + 2
def a__ ( self , _a ) -> Optional[int]:
return self.heap_dict[key]
def a__ ( self , _a ) -> Any:
_A : Tuple = len(lowerCAmelCase__ ) - 1
_A : List[str] = self.get_parent_idx(lowerCAmelCase__ )
for idx, i in enumerate(lowerCAmelCase__ ):
_A : Union[str, Any] = idx
_A : str = i.val
for i in range(lowerCAmelCase__ , -1 , -1 ):
self.sift_down(lowerCAmelCase__ , lowerCAmelCase__ )
return array
def a__ ( self , _a , _a ) -> Dict:
while True:
_A : Optional[Any] = self.get_left_child_idx(lowerCAmelCase__ ) # noqa: E741
_A : Dict = self.get_right_child_idx(lowerCAmelCase__ )
_A : int = idx
if l < len(lowerCAmelCase__ ) and array[l] < array[idx]:
_A : List[str] = l
if r < len(lowerCAmelCase__ ) and array[r] < array[smallest]:
_A : str = r
if smallest != idx:
_A : Any = array[smallest], array[idx]
(
_A
) : Optional[Any] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
_A : Optional[int] = smallest
else:
break
def a__ ( self , _a ) -> List[str]:
_A : Any = self.get_parent_idx(lowerCAmelCase__ )
while p >= 0 and self.heap[p] > self.heap[idx]:
_A : List[Any] = self.heap[idx], self.heap[p]
_A : Tuple = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
_A : Union[str, Any] = p
_A : Optional[int] = self.get_parent_idx(lowerCAmelCase__ )
def a__ ( self ) -> List[Any]:
return self.heap[0]
def a__ ( self ) -> List[str]:
_A : Tuple = self.heap[-1], self.heap[0]
_A : List[str] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
_A : int = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def a__ ( self , _a ) -> Optional[Any]:
self.heap.append(lowerCAmelCase__ )
_A : List[str] = len(self.heap ) - 1
_A : List[str] = node.val
self.sift_up(len(self.heap ) - 1 )
def a__ ( self ) -> int:
return len(self.heap ) == 0
def a__ ( self , _a , _a ) -> Optional[Any]:
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
_A : Any = new_value
_A : Tuple = new_value
self.sift_up(self.idx_of_element[node] )
_snake_case = Node("R", -1)
_snake_case = Node("B", 6)
_snake_case = Node("A", 3)
_snake_case = Node("X", 1)
_snake_case = Node("E", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
_snake_case = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("Min Heap - before decrease key")
for i in my_min_heap.heap:
print(i)
print("Min Heap - After decrease key of node [B -> -17]")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = [0] * len(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = []
SCREAMING_SNAKE_CASE_: str = []
SCREAMING_SNAKE_CASE_: List[str] = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_UpperCAmelCase ) ):
if indegree[i] == 0:
queue.append(_UpperCAmelCase )
while queue:
SCREAMING_SNAKE_CASE_: Optional[int] = queue.pop(0 )
cnt += 1
topo.append(_UpperCAmelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_UpperCAmelCase )
if cnt != len(_UpperCAmelCase ):
print("Cycle exists" )
else:
print(_UpperCAmelCase )
# Adjacency List of Graph
lowerCAmelCase : Any = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 13 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
__A : Any = '''timm_backbone'''
def __init__( self , __A=None , __A=3 , __A=True , __A=True , __A=None , **__A , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
lowerCamelCase : Dict = backbone
lowerCamelCase : Dict = num_channels
lowerCamelCase : Optional[Any] = features_only
lowerCamelCase : Optional[Any] = use_pretrained_backbone
lowerCamelCase : List[Any] = True
lowerCamelCase : List[str] = out_indices if out_indices is not None else (-1,)
| 283 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase : Optional[Any] = 16
lowerCAmelCase : List[str] = 32
def A_ ( _UpperCAmelCase , _UpperCAmelCase = 16 ):
SCREAMING_SNAKE_CASE_: Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE_: List[Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_: Tuple = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE_: List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE_: List[Any] = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE_: Optional[Any] = 8
else:
SCREAMING_SNAKE_CASE_: List[str] = None
return tokenizer.pad(
_UpperCAmelCase , padding="longest" , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Dict = DataLoader(
tokenized_datasets["train"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=(accelerator.mixed_precision == "fp8") , )
return train_dataloader, eval_dataloader
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
# Initialize accelerator
SCREAMING_SNAKE_CASE_: str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_: int = config["lr"]
SCREAMING_SNAKE_CASE_: Any = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE_: Optional[int] = int(config["seed"] )
SCREAMING_SNAKE_CASE_: List[Any] = int(config["batch_size"] )
SCREAMING_SNAKE_CASE_: List[str] = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE_: Optional[int] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE_: Tuple = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE_: Dict = MAX_GPU_BATCH_SIZE
set_seed(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_: List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_: Tuple = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_: Optional[int] = AdamW(params=model.parameters() , lr=_UpperCAmelCase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE_: Optional[int] = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE_: Tuple = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] = outputs.loss
SCREAMING_SNAKE_CASE_: Tuple = loss / gradient_accumulation_steps
accelerator.backward(_UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
SCREAMING_SNAKE_CASE_: List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , _UpperCAmelCase )
def A_ ( ):
SCREAMING_SNAKE_CASE_: Any = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
SCREAMING_SNAKE_CASE_: Optional[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE_: Optional[int] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 13 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowerCamelCase = logging.get_logger(__name__)
class _UpperCamelCase ( UpperCAmelCase_ ):
'''simple docstring'''
lowerCAmelCase__ = '''AutoTokenizer'''
lowerCAmelCase__ = ['''tokenizer''']
lowerCAmelCase__ = {
'''semantic_prompt''': 1,
'''coarse_prompt''': 2,
'''fine_prompt''': 2,
}
def __init__( self : str , _lowerCAmelCase : str , _lowerCAmelCase : Dict=None):
'''simple docstring'''
super().__init__(lowerCAmelCase__)
__lowercase =speaker_embeddings
@classmethod
def __lowerCamelCase ( cls : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple="speaker_embeddings_path.json" , **_lowerCAmelCase : int):
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
__lowercase =get_file_from_repo(
lowerCAmelCase__ , lowerCAmelCase__ , subfolder=kwargs.pop('subfolder' , lowerCAmelCase__) , cache_dir=kwargs.pop('cache_dir' , lowerCAmelCase__) , force_download=kwargs.pop('force_download' , lowerCAmelCase__) , proxies=kwargs.pop('proxies' , lowerCAmelCase__) , resume_download=kwargs.pop('resume_download' , lowerCAmelCase__) , local_files_only=kwargs.pop('local_files_only' , lowerCAmelCase__) , use_auth_token=kwargs.pop('use_auth_token' , lowerCAmelCase__) , revision=kwargs.pop('revision' , lowerCAmelCase__) , )
if speaker_embeddings_path is None:
logger.warning(
f"""`{os.path.join(lowerCAmelCase__ , lowerCAmelCase__)}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""")
__lowercase =None
else:
with open(lowerCAmelCase__) as speaker_embeddings_json:
__lowercase =json.load(lowerCAmelCase__)
else:
__lowercase =None
__lowercase =AutoTokenizer.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__)
return cls(tokenizer=lowerCAmelCase__ , speaker_embeddings=lowerCAmelCase__)
def __lowerCamelCase ( self : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : int="speaker_embeddings_path.json" , _lowerCAmelCase : Optional[int]="speaker_embeddings" , _lowerCAmelCase : bool = False , **_lowerCAmelCase : Any , ):
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ , 'v2') , exist_ok=lowerCAmelCase__)
__lowercase ={}
__lowercase =save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
__lowercase =self._load_voice_preset(lowerCAmelCase__)
__lowercase ={}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] , lowerCAmelCase__ , f"""{prompt_key}_{key}""") , voice_preset[key] , allow_pickle=lowerCAmelCase__ , )
__lowercase =os.path.join(lowerCAmelCase__ , f"""{prompt_key}_{key}.npy""")
__lowercase =tmp_dict
with open(os.path.join(lowerCAmelCase__ , lowerCAmelCase__) , 'w') as fp:
json.dump(lowerCAmelCase__ , lowerCAmelCase__)
super().save_pretrained(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__)
def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : str = None , **_lowerCAmelCase : Tuple):
'''simple docstring'''
__lowercase =self.speaker_embeddings[voice_preset]
__lowercase ={}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""")
__lowercase =get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' , '/') , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , lowerCAmelCase__) , cache_dir=kwargs.pop('cache_dir' , lowerCAmelCase__) , force_download=kwargs.pop('force_download' , lowerCAmelCase__) , proxies=kwargs.pop('proxies' , lowerCAmelCase__) , resume_download=kwargs.pop('resume_download' , lowerCAmelCase__) , local_files_only=kwargs.pop('local_files_only' , lowerCAmelCase__) , use_auth_token=kwargs.pop('use_auth_token' , lowerCAmelCase__) , revision=kwargs.pop('revision' , lowerCAmelCase__) , )
if path is None:
raise ValueError(
f"""`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/') , voice_preset_paths[key])}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.""")
__lowercase =np.load(lowerCAmelCase__)
return voice_preset_dict
def __lowerCamelCase ( self : Any , _lowerCAmelCase : Optional[dict] = None):
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f"""Voice preset unrecognized, missing {key} as a key.""")
if not isinstance(voice_preset[key] , np.ndarray):
raise ValueError(f"""{key} voice preset must be a {str(self.preset_shape[key])}D ndarray.""")
if len(voice_preset[key].shape) != self.preset_shape[key]:
raise ValueError(f"""{key} voice preset must be a {str(self.preset_shape[key])}D ndarray.""")
def __call__( self : Tuple , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : List[str]="pt" , _lowerCAmelCase : Union[str, Any]=2_5_6 , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : str=True , _lowerCAmelCase : List[str]=False , **_lowerCAmelCase : str , ):
'''simple docstring'''
if voice_preset is not None and not isinstance(lowerCAmelCase__ , lowerCAmelCase__):
if (
isinstance(lowerCAmelCase__ , lowerCAmelCase__)
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
__lowercase =self._load_voice_preset(lowerCAmelCase__)
else:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__) and not voice_preset.endswith('.npz'):
__lowercase =voice_preset + ".npz"
__lowercase =np.load(lowerCAmelCase__)
if voice_preset is not None:
self._validate_voice_preset_dict(lowerCAmelCase__ , **lowerCAmelCase__)
__lowercase =BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__)
__lowercase =self.tokenizer(
lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , padding='max_length' , max_length=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , **lowerCAmelCase__ , )
if voice_preset is not None:
__lowercase =voice_preset
return encoded_text
| 166 |
from collections.abc import Callable
class __lowercase :
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Callable | None = None):
# Stores actual heap items.
SCREAMING_SNAKE_CASE_: list = []
# Stores indexes of each item for supporting updates and deletion.
SCREAMING_SNAKE_CASE_: dict = {}
# Stores current size of heap.
SCREAMING_SNAKE_CASE_: Optional[Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
SCREAMING_SNAKE_CASE_: Any = key or (lambda lowerCAmelCase__: x)
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : int):
return int((i - 1) / 2) if i > 0 else None
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Union[str, Any] = int(2 * i + 1)
return left if 0 < left < self.size else None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Union[str, Any] = int(2 * i + 2)
return right if 0 < right < self.size else None
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = self.arr[j], self.arr[i]
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
return self.arr[i][1] < self.arr[j][1]
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Any = self._left(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = self._right(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = i
if left is not None and not self._cmp(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Optional[int] = left
if right is not None and not self._cmp(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Tuple = right
return valid_parent
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: List[Any] = self._parent(lowerCAmelCase__)
while parent is not None and not self._cmp(lowerCAmelCase__ , lowerCAmelCase__):
self._swap(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = parent, self._parent(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Optional[int] = self._get_valid_parent(lowerCAmelCase__)
while valid_parent != index:
self._swap(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = valid_parent, self._get_valid_parent(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
if item not in self.pos_map:
return
SCREAMING_SNAKE_CASE_: Any = self.pos_map[item]
SCREAMING_SNAKE_CASE_: int = [item, self.key(lowerCAmelCase__)]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowerCAmelCase__)
self._heapify_down(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : int):
if item not in self.pos_map:
return
SCREAMING_SNAKE_CASE_: Optional[Any] = self.pos_map[item]
del self.pos_map[item]
SCREAMING_SNAKE_CASE_: List[str] = self.arr[self.size - 1]
SCREAMING_SNAKE_CASE_: Tuple = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowerCAmelCase__)
self._heapify_down(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Optional[int] = len(self.arr)
if arr_len == self.size:
self.arr.append([item, self.key(lowerCAmelCase__)])
else:
SCREAMING_SNAKE_CASE_: str = [item, self.key(lowerCAmelCase__)]
SCREAMING_SNAKE_CASE_: List[Any] = self.size
self.size += 1
self._heapify_up(self.size - 1)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return self.arr[0] if self.size else None
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Dict = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0])
return top_item_tuple
def A_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A (UpperCAmelCase_ ):
'''simple docstring'''
__lowerCamelCase : UNetaDModel
__lowerCamelCase : KarrasVeScheduler
def __init__( self : Union[str, Any] , __lowerCAmelCase : UNetaDModel , __lowerCAmelCase : KarrasVeScheduler ) -> Optional[int]:
"""simple docstring"""
super().__init__()
self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
@torch.no_grad()
def __call__( self : Union[str, Any] , __lowerCAmelCase : int = 1 , __lowerCAmelCase : int = 50 , __lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , **__lowerCAmelCase : Optional[int] , ) -> List[Any]:
"""simple docstring"""
A__ = self.unet.config.sample_size
A__ = (batch_size, 3, img_size, img_size)
A__ = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
A__ = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(lowerCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
A__ = self.scheduler.schedule[t]
A__ = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
A__ = self.scheduler.add_noise_to_input(lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
A__ = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
A__ = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
A__ = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
A__ = self.scheduler.step_correct(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , step_output.prev_sample , step_output["""derivative"""] , )
A__ = step_output.prev_sample
A__ = (sample / 2 + 0.5).clamp(0 , 1 )
A__ = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A__ = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase__ )
| 274 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCAmelCase : Any = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCAmelCase : Dict = TaTokenizerFast
lowerCAmelCase : Optional[int] = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(
__name__,
globals()["""__file__"""],
_import_structure,
extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast},
module_spec=__spec__,
)
| 13 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=1_3 , snake_case=3_0 , snake_case=2 , snake_case=3 , snake_case=True , snake_case=True , snake_case=3_2 , snake_case=5 , snake_case=4 , snake_case=3_7 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=1_0 , snake_case=0.02 , snake_case=None , snake_case=2 , ):
'''simple docstring'''
UpperCAmelCase : str = parent
UpperCAmelCase : Optional[Any] = batch_size
UpperCAmelCase : str = image_size
UpperCAmelCase : Tuple = patch_size
UpperCAmelCase : int = num_channels
UpperCAmelCase : List[str] = is_training
UpperCAmelCase : str = use_labels
UpperCAmelCase : int = hidden_size
UpperCAmelCase : List[Any] = num_hidden_layers
UpperCAmelCase : Union[str, Any] = num_attention_heads
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : str = hidden_act
UpperCAmelCase : str = hidden_dropout_prob
UpperCAmelCase : List[str] = attention_probs_dropout_prob
UpperCAmelCase : int = type_sequence_label_size
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : Dict = scope
UpperCAmelCase : Dict = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase : List[Any] = (image_size // patch_size) ** 2
UpperCAmelCase : Dict = num_patches + 1
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : str = None
if self.use_labels:
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def A_ ( self ):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = ViTModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase : Optional[int] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = ViTForMaskedImageModeling(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase : str = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase : Dict = 1
UpperCAmelCase : List[str] = ViTForMaskedImageModeling(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.type_sequence_label_size
UpperCAmelCase : List[str] = ViTForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase : Any = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase : Union[str, Any] = 1
UpperCAmelCase : List[str] = ViTForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase : Dict = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
UpperCAmelCase
) : List[str] = config_and_inputs
UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Tuple = (
{'''feature-extraction''': ViTModel, '''image-classification''': ViTForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : List[str] = True
SCREAMING_SNAKE_CASE__ : List[Any] = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : Tuple = False
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = ViTModelTester(self )
UpperCAmelCase : Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=3_7 )
def A_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = model_class(lowerCAmelCase__ )
UpperCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Optional[Any] = [*signature.parameters.keys()]
UpperCAmelCase : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__ )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def A_ ( self ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Union[str, Any] = ViTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A_ ( self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(lowerCAmelCase__ )
UpperCAmelCase : Optional[Any] = self.default_image_processor
UpperCAmelCase : str = prepare_img()
UpperCAmelCase : Optional[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
UpperCAmelCase : Optional[int] = model(**lowerCAmelCase__ )
# verify the logits
UpperCAmelCase : Any = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
UpperCAmelCase : List[Any] = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = ViTModel.from_pretrained("facebook/dino-vits8" ).to(lowerCAmelCase__ )
UpperCAmelCase : List[Any] = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=4_8_0 )
UpperCAmelCase : List[Any] = prepare_img()
UpperCAmelCase : List[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt" )
UpperCAmelCase : int = inputs.pixel_values.to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
UpperCAmelCase : Optional[int] = model(lowerCAmelCase__ , interpolate_pos_encoding=lowerCAmelCase__ )
# verify the logits
UpperCAmelCase : Tuple = torch.Size((1, 3_6_0_1, 3_8_4) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase__ )
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
UpperCAmelCase : int = self.default_image_processor
UpperCAmelCase : Union[str, Any] = prepare_img()
UpperCAmelCase : Dict = image_processor(images=lowerCAmelCase__ , return_tensors="pt" )
UpperCAmelCase : str = inputs.pixel_values.to(lowerCAmelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCAmelCase : str = model(lowerCAmelCase__ )
| 311 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : List[str] = ShapEPipeline
_UpperCAmelCase : Tuple = ['''prompt''']
_UpperCAmelCase : Dict = ['''prompt''']
_UpperCAmelCase : Any = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
_UpperCAmelCase : Optional[int] = False
@property
def _SCREAMING_SNAKE_CASE ( self : List[str]):
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : List[str]):
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : int):
return self.time_input_dim * 4
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return 8
@property
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
return tokenizer
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowerCAmelCase__)
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Tuple = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
SCREAMING_SNAKE_CASE_: Any = PriorTransformer(**lowerCAmelCase__)
return model
@property
def _SCREAMING_SNAKE_CASE ( self : Dict):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Union[str, Any] = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE_: Optional[int] = ShapERenderer(**lowerCAmelCase__)
return model
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Dict = self.dummy_prior
SCREAMING_SNAKE_CASE_: Optional[Any] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.dummy_tokenizer
SCREAMING_SNAKE_CASE_: List[str] = self.dummy_renderer
SCREAMING_SNAKE_CASE_: Any = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1024 , prediction_type="sample" , use_karras_sigmas=lowerCAmelCase__ , clip_sample=lowerCAmelCase__ , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE_: Optional[int] = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]=0):
if str(lowerCAmelCase__).startswith("mps"):
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.manual_seed(lowerCAmelCase__)
else:
SCREAMING_SNAKE_CASE_: Any = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: str = "cpu"
SCREAMING_SNAKE_CASE_: Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE_: Dict = self.pipeline_class(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = pipe(**self.get_dummy_inputs(lowerCAmelCase__))
SCREAMING_SNAKE_CASE_: Optional[Any] = output.images[0]
SCREAMING_SNAKE_CASE_: Any = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE_: Union[str, Any] = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Dict = torch_device == "cpu"
SCREAMING_SNAKE_CASE_: List[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCAmelCase__ , relax_max_difference=lowerCAmelCase__ , )
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE_: str = self.pipeline_class(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = 1
SCREAMING_SNAKE_CASE_: Any = 2
SCREAMING_SNAKE_CASE_: Dict = self.get_dummy_inputs(lowerCAmelCase__)
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE_: List[Any] = batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE_: Tuple = pipe(**lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__)[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Any):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_np_out.npy")
SCREAMING_SNAKE_CASE_: List[str] = ShapEPipeline.from_pretrained("openai/shap-e")
SCREAMING_SNAKE_CASE_: Optional[int] = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = torch.Generator(device=lowerCAmelCase__).manual_seed(0)
SCREAMING_SNAKE_CASE_: int = pipe(
"a shark" , generator=lowerCAmelCase__ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__)
| 13 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class A ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : int = StableDiffusionPanoramaPipeline
__UpperCAmelCase : str = TEXT_TO_IMAGE_PARAMS
__UpperCAmelCase : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCAmelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
__UpperCAmelCase : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowercase_ (self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
UpperCAmelCase__ = DDIMScheduler()
torch.manual_seed(0 )
UpperCAmelCase__ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
UpperCAmelCase__ = CLIPTextModel(lowerCAmelCase__ )
UpperCAmelCase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase__ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowercase_ (self : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int]=0 ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = torch.manual_seed(lowerCAmelCase__ )
UpperCAmelCase__ = {
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def lowercase_ (self : int ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = StableDiffusionPanoramaPipeline(**lowerCAmelCase__ )
UpperCAmelCase__ = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase__ = self.get_dummy_inputs(lowerCAmelCase__ )
UpperCAmelCase__ = sd_pipe(**lowerCAmelCase__ ).images
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase__ = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase_ (self : Tuple ) -> Optional[Any]:
"""simple docstring"""
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowercase_ (self : List[str] ) -> List[Any]:
"""simple docstring"""
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def lowercase_ (self : Any ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = StableDiffusionPanoramaPipeline(**lowerCAmelCase__ )
UpperCAmelCase__ = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase__ = self.get_dummy_inputs(lowerCAmelCase__ )
UpperCAmelCase__ = "french fries"
UpperCAmelCase__ = sd_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ )
UpperCAmelCase__ = output.images
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase__ = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase_ (self : Dict ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = StableDiffusionPanoramaPipeline(**lowerCAmelCase__ )
UpperCAmelCase__ = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase__ = self.get_dummy_inputs(lowerCAmelCase__ )
UpperCAmelCase__ = sd_pipe(**lowerCAmelCase__ , view_batch_size=2 )
UpperCAmelCase__ = output.images
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase__ = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase_ (self : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" )
UpperCAmelCase__ = StableDiffusionPanoramaPipeline(**lowerCAmelCase__ )
UpperCAmelCase__ = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase__ = self.get_dummy_inputs(lowerCAmelCase__ )
UpperCAmelCase__ = sd_pipe(**lowerCAmelCase__ ).images
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase__ = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase_ (self : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ = self.get_dummy_components()
UpperCAmelCase__ = PNDMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , skip_prk_steps=lowerCAmelCase__ )
UpperCAmelCase__ = StableDiffusionPanoramaPipeline(**lowerCAmelCase__ )
UpperCAmelCase__ = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase__ = self.get_dummy_inputs(lowerCAmelCase__ )
UpperCAmelCase__ = sd_pipe(**lowerCAmelCase__ ).images
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase__ = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def lowercase_ (self : List[str] ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ (self : Any , __UpperCAmelCase : Optional[Any]=0 ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = torch.manual_seed(lowerCAmelCase__ )
UpperCAmelCase__ = {
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def lowercase_ (self : str ) -> str:
"""simple docstring"""
UpperCAmelCase__ = "stabilityai/stable-diffusion-2-base"
UpperCAmelCase__ = DDIMScheduler.from_pretrained(lowerCAmelCase__ , subfolder="scheduler" )
UpperCAmelCase__ = StableDiffusionPanoramaPipeline.from_pretrained(lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
UpperCAmelCase__ = self.get_inputs()
UpperCAmelCase__ = pipe(**lowerCAmelCase__ ).images
UpperCAmelCase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 2_0_4_8, 3)
UpperCAmelCase__ = np.array(
[
0.36968392,
0.27025372,
0.32446766,
0.28379387,
0.36363274,
0.30733347,
0.27100027,
0.27054125,
0.25536096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def lowercase_ (self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = StableDiffusionPanoramaPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base" , safety_checker=lowerCAmelCase__ )
UpperCAmelCase__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
UpperCAmelCase__ = self.get_inputs()
UpperCAmelCase__ = pipe(**lowerCAmelCase__ ).images
UpperCAmelCase__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 2_0_4_8, 3)
UpperCAmelCase__ = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowercase_ (self : Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = 0
def callback_fn(__UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : torch.FloatTensor ) -> None:
UpperCAmelCase__ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
UpperCAmelCase__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 2_5_6)
UpperCAmelCase__ = latents[0, -3:, -3:, -1]
UpperCAmelCase__ = np.array(
[
0.18681869,
0.33907816,
0.5361276,
0.14432865,
-0.02856611,
-0.73941123,
0.23397987,
0.47322682,
-0.37823164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
UpperCAmelCase__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 2_5_6)
UpperCAmelCase__ = latents[0, -3:, -3:, -1]
UpperCAmelCase__ = np.array(
[
0.18539645,
0.33987248,
0.5378559,
0.14437142,
-0.02455261,
-0.7338317,
0.23990755,
0.47356272,
-0.3786505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
UpperCAmelCase__ = False
UpperCAmelCase__ = "stabilityai/stable-diffusion-2-base"
UpperCAmelCase__ = DDIMScheduler.from_pretrained(lowerCAmelCase__ , subfolder="scheduler" )
UpperCAmelCase__ = StableDiffusionPanoramaPipeline.from_pretrained(lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ )
UpperCAmelCase__ = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
UpperCAmelCase__ = self.get_inputs()
pipe(**lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase_ (self : Dict ) -> int:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase__ = "stabilityai/stable-diffusion-2-base"
UpperCAmelCase__ = DDIMScheduler.from_pretrained(lowerCAmelCase__ , subfolder="scheduler" )
UpperCAmelCase__ = StableDiffusionPanoramaPipeline.from_pretrained(lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ )
UpperCAmelCase__ = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase__ = self.get_inputs()
UpperCAmelCase__ = pipe(**lowerCAmelCase__ )
UpperCAmelCase__ = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 1_0**9
| 65 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 13 | 0 |
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
UpperCamelCase = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def snake_case__ ( self : List[Any] , a_ : List[str] , a_ : Tuple , a_ : Dict ):
'''simple docstring'''
__UpperCAmelCase : Any = AudioClassificationPipeline(model=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ )
# test with a raw waveform
__UpperCAmelCase : Union[str, Any] = np.zeros((3_40_00,) )
__UpperCAmelCase : List[Any] = np.zeros((1_40_00,) )
return audio_classifier, [audioa, audio]
def snake_case__ ( self : Optional[int] , a_ : List[str] , a_ : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = examples
__UpperCAmelCase : List[str] = audio_classifier(lowerCAmelCase__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
lowerCAmelCase__ , [
{'''score''': ANY(lowerCAmelCase__ ), '''label''': ANY(lowerCAmelCase__ )},
{'''score''': ANY(lowerCAmelCase__ ), '''label''': ANY(lowerCAmelCase__ )},
] , )
__UpperCAmelCase : Dict = audio_classifier(lowerCAmelCase__ , top_k=1 )
self.assertEqual(
lowerCAmelCase__ , [
{'''score''': ANY(lowerCAmelCase__ ), '''label''': ANY(lowerCAmelCase__ )},
] , )
self.run_torchaudio(lowerCAmelCase__ )
@require_torchaudio
def snake_case__ ( self : Union[str, Any] , a_ : Dict ):
'''simple docstring'''
import datasets
# test with a local file
__UpperCAmelCase : Optional[Any] = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
__UpperCAmelCase : List[Any] = dataset[0]["audio"]["array"]
__UpperCAmelCase : Tuple = audio_classifier(lowerCAmelCase__ )
self.assertEqual(
lowerCAmelCase__ , [
{'''score''': ANY(lowerCAmelCase__ ), '''label''': ANY(lowerCAmelCase__ )},
{'''score''': ANY(lowerCAmelCase__ ), '''label''': ANY(lowerCAmelCase__ )},
] , )
@require_torch
def snake_case__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : List[str] = "anton-l/wav2vec2-random-tiny-classifier"
__UpperCAmelCase : Tuple = pipeline('''audio-classification''' , model=lowerCAmelCase__ )
__UpperCAmelCase : str = np.ones((80_00,) )
__UpperCAmelCase : Optional[int] = audio_classifier(lowerCAmelCase__ , top_k=4 )
__UpperCAmelCase : List[Any] = [
{"score": 0.0_8_4_2, "label": "no"},
{"score": 0.0_8_3_8, "label": "up"},
{"score": 0.0_8_3_7, "label": "go"},
{"score": 0.0_8_3_4, "label": "right"},
]
__UpperCAmelCase : List[Any] = [
{"score": 0.0_8_4_5, "label": "stop"},
{"score": 0.0_8_4_4, "label": "on"},
{"score": 0.0_8_4_1, "label": "right"},
{"score": 0.0_8_3_4, "label": "left"},
]
self.assertIn(nested_simplify(lowerCAmelCase__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
__UpperCAmelCase : int = {"array": np.ones((80_00,) ), "sampling_rate": audio_classifier.feature_extractor.sampling_rate}
__UpperCAmelCase : str = audio_classifier(lowerCAmelCase__ , top_k=4 )
self.assertIn(nested_simplify(lowerCAmelCase__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def snake_case__ ( self : Any ):
'''simple docstring'''
import datasets
__UpperCAmelCase : Optional[Any] = "superb/wav2vec2-base-superb-ks"
__UpperCAmelCase : Union[str, Any] = pipeline('''audio-classification''' , model=lowerCAmelCase__ )
__UpperCAmelCase : Any = datasets.load_dataset('''anton-l/superb_dummy''' , '''ks''' , split='''test''' )
__UpperCAmelCase : List[Any] = np.array(dataset[3]['''speech'''] , dtype=np.floataa )
__UpperCAmelCase : str = audio_classifier(lowerCAmelCase__ , top_k=4 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=3 ) , [
{'''score''': 0.9_8_1, '''label''': '''go'''},
{'''score''': 0.0_0_7, '''label''': '''up'''},
{'''score''': 0.0_0_6, '''label''': '''_unknown_'''},
{'''score''': 0.0_0_1, '''label''': '''down'''},
] , )
@require_tf
@unittest.skip('''Audio classification is not implemented for TF''' )
def snake_case__ ( self : List[str] ):
'''simple docstring'''
pass
| 226 |
class __lowercase :
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]):
SCREAMING_SNAKE_CASE_: List[str] = name
SCREAMING_SNAKE_CASE_: Union[str, Any] = val
def __str__( self : Dict):
return F"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__( self : List[str] , lowerCAmelCase__ : Any):
return self.val < other.val
class __lowercase :
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: int = {}
SCREAMING_SNAKE_CASE_: Any = self.build_heap(lowerCAmelCase__)
def __getitem__( self : List[Any] , lowerCAmelCase__ : Dict):
return self.get_value(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Dict):
return (idx - 1) // 2
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any]):
return idx * 2 + 1
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Tuple):
return idx * 2 + 2
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Optional[int]):
return self.heap_dict[key]
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Tuple = len(lowerCAmelCase__) - 1
SCREAMING_SNAKE_CASE_: List[str] = self.get_parent_idx(lowerCAmelCase__)
for idx, i in enumerate(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Union[str, Any] = idx
SCREAMING_SNAKE_CASE_: str = i.val
for i in range(lowerCAmelCase__ , -1 , -1):
self.sift_down(lowerCAmelCase__ , lowerCAmelCase__)
return array
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str]):
while True:
SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_left_child_idx(lowerCAmelCase__) # noqa: E741
SCREAMING_SNAKE_CASE_: Dict = self.get_right_child_idx(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = idx
if l < len(lowerCAmelCase__) and array[l] < array[idx]:
SCREAMING_SNAKE_CASE_: List[str] = l
if r < len(lowerCAmelCase__) and array[r] < array[smallest]:
SCREAMING_SNAKE_CASE_: str = r
if smallest != idx:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = array[smallest], array[idx]
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
): Optional[Any] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
SCREAMING_SNAKE_CASE_: Optional[int] = smallest
else:
break
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: Any = self.get_parent_idx(lowerCAmelCase__)
while p >= 0 and self.heap[p] > self.heap[idx]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = self.heap[idx], self.heap[p]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
SCREAMING_SNAKE_CASE_: Union[str, Any] = p
SCREAMING_SNAKE_CASE_: Optional[int] = self.get_parent_idx(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return self.heap[0]
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self.heap[-1], self.heap[0]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
SCREAMING_SNAKE_CASE_: int = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap)
return x
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Tuple):
self.heap.append(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = len(self.heap) - 1
SCREAMING_SNAKE_CASE_: List[str] = node.val
self.sift_up(len(self.heap) - 1)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return len(self.heap) == 0
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int]):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
SCREAMING_SNAKE_CASE_: Any = new_value
SCREAMING_SNAKE_CASE_: Tuple = new_value
self.sift_up(self.idx_of_element[node])
lowerCAmelCase : int = Node("""R""", -1)
lowerCAmelCase : str = Node("""B""", 6)
lowerCAmelCase : str = Node("""A""", 3)
lowerCAmelCase : List[str] = Node("""X""", 1)
lowerCAmelCase : Union[str, Any] = Node("""E""", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
lowerCAmelCase : Optional[Any] = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("""Min Heap - before decrease key""")
for i in my_min_heap.heap:
print(i)
print("""Min Heap - After decrease key of node [B -> -17]""")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Dict ) ->Dict:
snake_case__ : List[Any] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case__ : Dict = get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(lowerCAmelCase__ ), torch_builtin(lowerCAmelCase__ ) ) )
self.assertFalse(torch.allclose(gelu_python(lowerCAmelCase__ ), gelu_new(lowerCAmelCase__ ) ) )
def lowercase_ ( self : Any ) ->Any:
snake_case__ : Union[str, Any] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case__ : Optional[int] = get_activation('gelu' )
snake_case__ : List[str] = get_activation('gelu_10' )
snake_case__ : Union[str, Any] = torch_builtin(lowerCAmelCase__ )
snake_case__ : str = geluaa(lowerCAmelCase__ )
snake_case__ : str = torch.where(y_gelu_aa < 1_0.0, 1, 0 )
self.assertTrue(torch.max(lowerCAmelCase__ ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask, y_gelu_aa * clipped_mask ) )
def lowercase_ ( self : Tuple ) ->Dict:
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(lowerCAmelCase__ ):
get_activation('bogus' )
with self.assertRaises(lowerCAmelCase__ ):
get_activation(lowerCAmelCase__ )
def lowercase_ ( self : Optional[int] ) ->Tuple:
snake_case__ : List[str] = get_activation('gelu' )
snake_case__ : Optional[int] = 1
snake_case__ : Union[str, Any] = get_activation('gelu' )
self.assertEqual(acta.a, 1 )
with self.assertRaises(lowerCAmelCase__ ):
snake_case__ : Optional[int] = acta.a
| 277 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowerCAmelCase : Any = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
if rng is None:
SCREAMING_SNAKE_CASE_: List[Any] = random.Random()
SCREAMING_SNAKE_CASE_: Optional[Any] = 1
for dim in shape:
total_dims *= dim
SCREAMING_SNAKE_CASE_: Optional[Any] = []
for _ in range(_UpperCAmelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
SCREAMING_SNAKE_CASE_: List[Any] = np.array(_UpperCAmelCase , dtype=jnp.intaa ).reshape(_UpperCAmelCase )
return output
def A_ ( _UpperCAmelCase , _UpperCAmelCase=None ):
SCREAMING_SNAKE_CASE_: Optional[int] = ids_tensor(_UpperCAmelCase , vocab_size=2 , rng=_UpperCAmelCase )
# make sure that at least one token is attended to for each batch
SCREAMING_SNAKE_CASE_: Optional[Any] = 1
return attn_mask
@require_flax
class __lowercase :
"""simple docstring"""
_UpperCAmelCase : Any = None
_UpperCAmelCase : List[Any] = ()
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
SCREAMING_SNAKE_CASE_: str = 2
SCREAMING_SNAKE_CASE_: Optional[int] = inputs["input_ids"].shape[-1] // 2
SCREAMING_SNAKE_CASE_: List[str] = inputs["input_ids"][:max_batch_size, :sequence_length]
SCREAMING_SNAKE_CASE_: Any = jnp.ones_like(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
SCREAMING_SNAKE_CASE_: Optional[Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
SCREAMING_SNAKE_CASE_: Optional[Any] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Union[str, Any] = False
SCREAMING_SNAKE_CASE_: Dict = max_length
SCREAMING_SNAKE_CASE_: List[Any] = 0
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: int = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE_: List[Any] = getattr(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = pt_model_class(lowerCAmelCase__).eval()
SCREAMING_SNAKE_CASE_: str = load_flax_weights_in_pytorch_model(lowerCAmelCase__ , flax_model.params)
SCREAMING_SNAKE_CASE_: List[Any] = flax_model.generate(lowerCAmelCase__).sequences
SCREAMING_SNAKE_CASE_: str = pt_model.generate(torch.tensor(lowerCAmelCase__ , dtype=torch.long))
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
SCREAMING_SNAKE_CASE_: List[Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Optional[int] = False
SCREAMING_SNAKE_CASE_: Optional[int] = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Union[str, Any] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = jit(model.generate)
SCREAMING_SNAKE_CASE_: Union[str, Any] = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Optional[Any] = True
SCREAMING_SNAKE_CASE_: Dict = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Tuple = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = jit(model.generate)
SCREAMING_SNAKE_CASE_: Dict = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: int = False
SCREAMING_SNAKE_CASE_: Optional[int] = max_length
SCREAMING_SNAKE_CASE_: Optional[int] = 2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: List[str] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = jit(model.generate)
SCREAMING_SNAKE_CASE_: Optional[int] = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: str = False
SCREAMING_SNAKE_CASE_: int = max_length
SCREAMING_SNAKE_CASE_: str = 2
SCREAMING_SNAKE_CASE_: Optional[Any] = 2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: str = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences)
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Tuple = True
SCREAMING_SNAKE_CASE_: List[str] = max_length
SCREAMING_SNAKE_CASE_: Any = 0.8
SCREAMING_SNAKE_CASE_: Any = 10
SCREAMING_SNAKE_CASE_: List[str] = 0.3
SCREAMING_SNAKE_CASE_: Tuple = 1
SCREAMING_SNAKE_CASE_: Union[str, Any] = 8
SCREAMING_SNAKE_CASE_: int = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: List[str] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = jit(model.generate)
SCREAMING_SNAKE_CASE_: List[Any] = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Any = max_length
SCREAMING_SNAKE_CASE_: int = 1
SCREAMING_SNAKE_CASE_: Union[str, Any] = 8
SCREAMING_SNAKE_CASE_: List[Any] = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: int = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = jit(model.generate)
SCREAMING_SNAKE_CASE_: List[str] = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Any = max_length
SCREAMING_SNAKE_CASE_: List[str] = 2
SCREAMING_SNAKE_CASE_: str = 1
SCREAMING_SNAKE_CASE_: Tuple = 8
SCREAMING_SNAKE_CASE_: List[Any] = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Optional[int] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = jit(model.generate)
SCREAMING_SNAKE_CASE_: List[str] = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_: Dict = attention_mask.at[(0, 0)].set(0)
SCREAMING_SNAKE_CASE_: Dict = False
SCREAMING_SNAKE_CASE_: Optional[int] = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Any = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = model.generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = jit(model.generate)
SCREAMING_SNAKE_CASE_: List[Any] = jit_generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_: List[Any] = attention_mask.at[(0, 0)].set(0)
SCREAMING_SNAKE_CASE_: Optional[int] = True
SCREAMING_SNAKE_CASE_: Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: str = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = model.generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = jit(model.generate)
SCREAMING_SNAKE_CASE_: Optional[Any] = jit_generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_: Dict = attention_mask.at[(0, 0)].set(0)
SCREAMING_SNAKE_CASE_: Optional[Any] = 2
SCREAMING_SNAKE_CASE_: Any = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Tuple = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = model.generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = jit(model.generate)
SCREAMING_SNAKE_CASE_: Union[str, Any] = jit_generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
@require_flax
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert")
SCREAMING_SNAKE_CASE_: List[Any] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only")
SCREAMING_SNAKE_CASE_: Optional[int] = "Hello world"
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer(lowerCAmelCase__ , return_tensors="np").input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(lowerCAmelCase__ , "do_samples"):
model.generate(lowerCAmelCase__ , do_samples=lowerCAmelCase__)
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(lowerCAmelCase__ , "foo"):
SCREAMING_SNAKE_CASE_: str = {"foo": "bar"}
model.generate(lowerCAmelCase__ , **lowerCAmelCase__)
| 13 | 0 |
from __future__ import annotations
lowercase : List[Any] = """Muhammad Umer Farooq"""
lowercase : Tuple = """MIT"""
lowercase : List[str] = """1.0.0"""
lowercase : Any = """Muhammad Umer Farooq"""
lowercase : Optional[Any] = """[email protected]"""
lowercase : Optional[Any] = """Alpha"""
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class __snake_case ( UpperCAmelCase_ ):
def __init__( self ,snake_case ):
'''simple docstring'''
super().__init__()
lowercase : list[str] = []
lowercase : List[Any] = domain
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
lowercase : str = parse.urljoin(self.domain ,lowerCAmelCase__ )
self.urls.append(lowerCAmelCase__ )
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
return ".".join(get_sub_domain_name(_UpperCAmelCase ).split(""".""" )[-2:] )
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
return parse.urlparse(_UpperCAmelCase ).netloc
def _snake_case( SCREAMING_SNAKE_CASE__ = "https://github.com" ) -> Optional[int]:
lowercase : Optional[int] = get_domain_name(_UpperCAmelCase )
# Initialize the parser
lowercase : Any = Parser(_UpperCAmelCase )
try:
# Open URL
lowercase : Any = requests.get(_UpperCAmelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
lowercase : Dict = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
lowercase : Optional[Any] = requests.get(_UpperCAmelCase )
# Get the valid email.
lowercase : Dict = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_UpperCAmelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_UpperCAmelCase )
if __name__ == "__main__":
lowercase : List[Any] = emails_from_url("""https://github.com""")
print(F'''{len(emails)} emails found:''')
print("""\n""".join(sorted(emails)))
| 20 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCAmelCase : Union[str, Any] = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
lowerCAmelCase : int = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def A_ ( _UpperCAmelCase , _UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = create_model(
"HTSAT-tiny" , "roberta" , _UpperCAmelCase , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=_UpperCAmelCase , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = {}
SCREAMING_SNAKE_CASE_: Tuple = R".*sequential.(\d+).*"
SCREAMING_SNAKE_CASE_: Dict = R".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
SCREAMING_SNAKE_CASE_: Any = key.replace(_UpperCAmelCase , _UpperCAmelCase )
if re.match(_UpperCAmelCase , _UpperCAmelCase ):
# replace sequential layers with list
SCREAMING_SNAKE_CASE_: Optional[int] = re.match(_UpperCAmelCase , _UpperCAmelCase ).group(1 )
SCREAMING_SNAKE_CASE_: Dict = key.replace(f"sequential.{sequential_layer}." , f"layers.{int(_UpperCAmelCase )//3}.linear." )
elif re.match(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = int(re.match(_UpperCAmelCase , _UpperCAmelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
SCREAMING_SNAKE_CASE_: Optional[int] = 1 if projecton_layer == 0 else 2
SCREAMING_SNAKE_CASE_: Dict = key.replace(f"_projection.{projecton_layer}." , f"_projection.linear{transformers_projection_layer}." )
if "audio" and "qkv" in key:
# split qkv into query key and value
SCREAMING_SNAKE_CASE_: Tuple = value
SCREAMING_SNAKE_CASE_: List[str] = mixed_qkv.size(0 ) // 3
SCREAMING_SNAKE_CASE_: Any = mixed_qkv[:qkv_dim]
SCREAMING_SNAKE_CASE_: Optional[int] = mixed_qkv[qkv_dim : qkv_dim * 2]
SCREAMING_SNAKE_CASE_: Optional[Any] = mixed_qkv[qkv_dim * 2 :]
SCREAMING_SNAKE_CASE_: str = query_layer
SCREAMING_SNAKE_CASE_: int = key_layer
SCREAMING_SNAKE_CASE_: List[Any] = value_layer
else:
SCREAMING_SNAKE_CASE_: int = value
return model_state_dict
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = init_clap(_UpperCAmelCase , enable_fusion=_UpperCAmelCase )
clap_model.eval()
SCREAMING_SNAKE_CASE_: Union[str, Any] = clap_model.state_dict()
SCREAMING_SNAKE_CASE_: Optional[int] = rename_state_dict(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = ClapConfig()
SCREAMING_SNAKE_CASE_: Tuple = enable_fusion
SCREAMING_SNAKE_CASE_: Tuple = ClapModel(_UpperCAmelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
transformers_config.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
lowerCAmelCase : int = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 13 | 0 |
import functools
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Optional[Any]:
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not all(isinstance(_UpperCAmelCase , _UpperCAmelCase ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(_UpperCAmelCase ) != 3 or not all(isinstance(_UpperCAmelCase , _UpperCAmelCase ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(_UpperCAmelCase ) == 0:
return 0
if min(_UpperCAmelCase ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(_UpperCAmelCase ) >= 3_66:
raise ValueError('''All days elements should be less than 366''' )
UpperCAmelCase : Any =set(_UpperCAmelCase )
@functools.cache
def dynamic_programming(__lowerCAmelCase ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348 |
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any=13 , lowerCAmelCase__ : Tuple=30 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : str=32 , lowerCAmelCase__ : Any=5 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : int=37 , lowerCAmelCase__ : Optional[Any]="gelu" , lowerCAmelCase__ : Optional[int]=0.1 , lowerCAmelCase__ : Dict=0.1 , lowerCAmelCase__ : Tuple=10 , lowerCAmelCase__ : Optional[Any]=0.02 , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Union[str, Any]=2 , ):
SCREAMING_SNAKE_CASE_: str = parent
SCREAMING_SNAKE_CASE_: Optional[Any] = batch_size
SCREAMING_SNAKE_CASE_: str = image_size
SCREAMING_SNAKE_CASE_: Tuple = patch_size
SCREAMING_SNAKE_CASE_: int = num_channels
SCREAMING_SNAKE_CASE_: List[str] = is_training
SCREAMING_SNAKE_CASE_: str = use_labels
SCREAMING_SNAKE_CASE_: int = hidden_size
SCREAMING_SNAKE_CASE_: List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_: Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE_: Any = intermediate_size
SCREAMING_SNAKE_CASE_: str = hidden_act
SCREAMING_SNAKE_CASE_: str = hidden_dropout_prob
SCREAMING_SNAKE_CASE_: List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: int = type_sequence_label_size
SCREAMING_SNAKE_CASE_: Dict = initializer_range
SCREAMING_SNAKE_CASE_: Dict = scope
SCREAMING_SNAKE_CASE_: Dict = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_: List[Any] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_: Dict = num_patches + 1
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_: Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple):
SCREAMING_SNAKE_CASE_: Union[str, Any] = ViTModel(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Optional[int] = model(lowerCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Optional[int] = ViTForMaskedImageModeling(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: str = model(lowerCAmelCase__)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
SCREAMING_SNAKE_CASE_: Dict = 1
SCREAMING_SNAKE_CASE_: List[str] = ViTForMaskedImageModeling(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: str = model(lowerCAmelCase__)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Tuple = self.type_sequence_label_size
SCREAMING_SNAKE_CASE_: List[str] = ViTForImageClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Any = model(lowerCAmelCase__ , labels=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
SCREAMING_SNAKE_CASE_: Union[str, Any] = 1
SCREAMING_SNAKE_CASE_: List[str] = ViTForImageClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: Dict = model(lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
): List[str] = config_and_inputs
SCREAMING_SNAKE_CASE_: Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : List[Any] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_UpperCAmelCase : Tuple = (
{'''feature-extraction''': ViTModel, '''image-classification''': ViTForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase : List[str] = True
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Tuple = False
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: List[str] = ViTModelTester(self)
SCREAMING_SNAKE_CASE_: Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : Any):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds")
def _SCREAMING_SNAKE_CASE ( self : str):
pass
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Dict = model_class(lowerCAmelCase__)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
SCREAMING_SNAKE_CASE_: List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear))
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: List[Any] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: Optional[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__)
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: Union[str, Any] = ViTModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
def A_ ( ):
SCREAMING_SNAKE_CASE_: List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _SCREAMING_SNAKE_CASE ( self : int):
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224") if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: int = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE_: str = prepare_img()
SCREAMING_SNAKE_CASE_: Optional[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt").to(lowerCAmelCase__)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = model(**lowerCAmelCase__)
# verify the logits
SCREAMING_SNAKE_CASE_: Any = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = torch.tensor([-0.2744, 0.8215, -0.0836]).to(lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4))
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
SCREAMING_SNAKE_CASE_: str = ViTModel.from_pretrained("facebook/dino-vits8").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480)
SCREAMING_SNAKE_CASE_: List[Any] = prepare_img()
SCREAMING_SNAKE_CASE_: List[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt")
SCREAMING_SNAKE_CASE_: int = inputs.pixel_values.to(lowerCAmelCase__)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = model(lowerCAmelCase__ , interpolate_pos_encoding=lowerCAmelCase__)
# verify the logits
SCREAMING_SNAKE_CASE_: Tuple = torch.Size((1, 3601, 384))
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]]).to(lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4))
@slow
@require_accelerate
@require_torch_gpu
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Dict = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto")
SCREAMING_SNAKE_CASE_: int = self.default_image_processor
SCREAMING_SNAKE_CASE_: Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE_: Dict = image_processor(images=lowerCAmelCase__ , return_tensors="pt")
SCREAMING_SNAKE_CASE_: str = inputs.pixel_values.to(lowerCAmelCase__)
# forward pass to make sure inference works in fp16
with torch.no_grad():
SCREAMING_SNAKE_CASE_: str = model(lowerCAmelCase__)
| 13 | 0 |
"""simple docstring"""
# Algorithm for the pigeonhole sorting
def __lowerCamelCase ( a_ : Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE :Optional[Any] = min(_UpperCAmelCase ) # min() finds the minimum value
__SCREAMING_SNAKE_CASE :Tuple = max(_UpperCAmelCase ) # max() finds the maximum value
__SCREAMING_SNAKE_CASE :str = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
__SCREAMING_SNAKE_CASE :List[Any] = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
__SCREAMING_SNAKE_CASE :int = 0
for count in range(_UpperCAmelCase ):
while holes[count] > 0:
holes[count] -= 1
__SCREAMING_SNAKE_CASE :str = count + min_val
i += 1
def __lowerCamelCase ( ) -> Any:
__SCREAMING_SNAKE_CASE :List[str] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_UpperCAmelCase )
print('''Sorted order is:''' , ''' '''.join(_UpperCAmelCase ) )
if __name__ == "__main__":
main() | 191 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
lowerCAmelCase : Optional[int] = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
lowerCAmelCase : Optional[Any] = {
"""allenai/longformer-base-4096""": 4096,
"""allenai/longformer-large-4096""": 4096,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 4096,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 4096,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def A_ ( ):
SCREAMING_SNAKE_CASE_: Any = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE_: Tuple = bs[:]
SCREAMING_SNAKE_CASE_: str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCAmelCase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_: Optional[int] = [chr(_UpperCAmelCase ) for n in cs]
return dict(zip(_UpperCAmelCase , _UpperCAmelCase ) )
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = set()
SCREAMING_SNAKE_CASE_: Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_: Tuple = char
return pairs
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Any = VOCAB_FILES_NAMES
_UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]="replace" , lowerCAmelCase__ : Optional[Any]="<s>" , lowerCAmelCase__ : int="</s>" , lowerCAmelCase__ : Optional[Any]="</s>" , lowerCAmelCase__ : int="<s>" , lowerCAmelCase__ : Optional[Any]="<unk>" , lowerCAmelCase__ : List[Any]="<pad>" , lowerCAmelCase__ : Any="<mask>" , lowerCAmelCase__ : Union[str, Any]=False , **lowerCAmelCase__ : Tuple , ):
SCREAMING_SNAKE_CASE_: int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else bos_token
SCREAMING_SNAKE_CASE_: str = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else eos_token
SCREAMING_SNAKE_CASE_: Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else sep_token
SCREAMING_SNAKE_CASE_: Union[str, Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else cls_token
SCREAMING_SNAKE_CASE_: int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else unk_token
SCREAMING_SNAKE_CASE_: Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_: Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="utf-8") as vocab_handle:
SCREAMING_SNAKE_CASE_: Tuple = json.load(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_: Optional[Any] = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_: List[Any] = bytes_to_unicode()
SCREAMING_SNAKE_CASE_: Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="utf-8") as merges_handle:
SCREAMING_SNAKE_CASE_: List[Any] = merges_handle.read().split("\n")[1:-1]
SCREAMING_SNAKE_CASE_: str = [tuple(merge.split()) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_: List[Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__))))
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_: List[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+")
@property
def _SCREAMING_SNAKE_CASE ( self : int):
return len(self.encoder)
def _SCREAMING_SNAKE_CASE ( self : int):
return dict(self.encoder , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : List[str]):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_: Optional[int] = tuple(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = get_pairs(lowerCAmelCase__)
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_: int = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__: self.bpe_ranks.get(lowerCAmelCase__ , float("inf")))
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = bigram
SCREAMING_SNAKE_CASE_: Optional[int] = []
SCREAMING_SNAKE_CASE_: List[Any] = 0
while i < len(lowerCAmelCase__):
try:
SCREAMING_SNAKE_CASE_: List[Any] = word.index(lowerCAmelCase__ , lowerCAmelCase__)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
SCREAMING_SNAKE_CASE_: Tuple = j
if word[i] == first and i < len(lowerCAmelCase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
SCREAMING_SNAKE_CASE_: str = tuple(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = new_word
if len(lowerCAmelCase__) == 1:
break
else:
SCREAMING_SNAKE_CASE_: Dict = get_pairs(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = " ".join(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = word
return word
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Tuple):
SCREAMING_SNAKE_CASE_: Optional[Any] = []
for token in re.findall(self.pat , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: str = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__).split(" "))
return bpe_tokens
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Tuple):
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Union[str, Any]):
return self.decoder.get(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Optional[int]):
SCREAMING_SNAKE_CASE_: Any = "".join(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8" , errors=self.errors)
return text
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None):
if not os.path.isdir(lowerCAmelCase__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
SCREAMING_SNAKE_CASE_: Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
SCREAMING_SNAKE_CASE_: Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(lowerCAmelCase__ , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__) + "\n")
SCREAMING_SNAKE_CASE_: List[Any] = 0
with open(lowerCAmelCase__ , "w" , encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
SCREAMING_SNAKE_CASE_: List[Any] = token_index
writer.write(" ".join(lowerCAmelCase__) + "\n")
index += 1
return vocab_file, merge_file
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_: Optional[int] = [self.cls_token_id]
SCREAMING_SNAKE_CASE_: Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__)
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__)) + [1]
return [1] + ([0] * len(lowerCAmelCase__)) + [1, 1] + ([0] * len(lowerCAmelCase__)) + [1]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_: Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str]=False , **lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: List[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_: Optional[Any] = " " + text
return (text, kwargs)
| 13 | 0 |
from __future__ import annotations
import os
from typing import Any
import requests
_snake_case = """https://api.github.com"""
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
_snake_case = BASE_URL + """/user"""
# https://github.com/settings/tokens
_snake_case = os.environ.get("USER_TOKEN", "")
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = {
"Authorization": f'''token {auth_token}''',
"Accept": "application/vnd.github.v3+json",
}
return requests.get(_UpperCAmelCase,headers=_UpperCAmelCase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f"""{key}: {value}""")
else:
raise ValueError("'USER_TOKEN' field cannot be empty.")
| 26 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[str]):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
SCREAMING_SNAKE_CASE_: Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]]
SCREAMING_SNAKE_CASE_: Any = DisjunctiveConstraint(lowerCAmelCase__)
self.assertTrue(isinstance(dc.token_ids , lowerCAmelCase__))
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]]))
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])])
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
SCREAMING_SNAKE_CASE_: Union[str, Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint(lowerCAmelCase__) # fails here
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: List[str] = [[1, 2, 3], [1, 2, 4]]
SCREAMING_SNAKE_CASE_: Tuple = DisjunctiveConstraint(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = dc.update(1)
SCREAMING_SNAKE_CASE_: Dict = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = dc.update(2)
SCREAMING_SNAKE_CASE_: Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = dc.update(3)
SCREAMING_SNAKE_CASE_: Tuple = stepped is True and completed is True and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3])
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
SCREAMING_SNAKE_CASE_: List[Any] = DisjunctiveConstraint(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = dc.update(4)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2, 4])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5])
dc.reset()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 3)
self.assertTrue(dc.current_seq == [1])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 2)
self.assertTrue(dc.current_seq == [1, 2])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.remaining() == 0)
self.assertTrue(dc.current_seq == [1, 2, 5])
| 13 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""speechbrain/m-ctc-t-large""": """https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json""",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class UpperCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
__A : int = '''mctct'''
def __init__( self , __A=8065 , __A=1536 , __A=36 , __A=6144 , __A=4 , __A=384 , __A=920 , __A=1e-5 , __A=0.3 , __A="relu" , __A=0.02 , __A=0.3 , __A=0.3 , __A=1 , __A=0 , __A=2 , __A=1 , __A=0.3 , __A=1 , __A=(7,) , __A=(3,) , __A=80 , __A=1 , __A=None , __A="sum" , __A=False , **__A , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : Optional[int] = hidden_size
lowerCamelCase : List[str] = num_hidden_layers
lowerCamelCase : Dict = intermediate_size
lowerCamelCase : int = num_attention_heads
lowerCamelCase : Optional[Any] = attention_head_dim
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : Optional[int] = layer_norm_eps
lowerCamelCase : Optional[Any] = layerdrop
lowerCamelCase : int = hidden_act
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : List[Any] = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : str = pad_token_id
lowerCamelCase : str = bos_token_id
lowerCamelCase : Optional[int] = eos_token_id
lowerCamelCase : Tuple = conv_glu_dim
lowerCamelCase : List[Any] = conv_dropout
lowerCamelCase : List[str] = num_conv_layers
lowerCamelCase : List[Any] = input_feat_per_channel
lowerCamelCase : Dict = input_channels
lowerCamelCase : str = conv_channels
lowerCamelCase : Union[str, Any] = ctc_loss_reduction
lowerCamelCase : Union[str, Any] = ctc_zero_infinity
# prevents config testing fail with exporting to json
lowerCamelCase : Union[str, Any] = list(lowerCAmelCase__ )
lowerCamelCase : int = list(lowerCAmelCase__ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
F"""but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
| 283 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = XGLMTokenizer
_UpperCAmelCase : List[Any] = XGLMTokenizerFast
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : Tuple = True
def _SCREAMING_SNAKE_CASE ( self : Tuple):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_: List[Any] = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
tokenizer.save_pretrained(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: Optional[Any] = "<pad>"
SCREAMING_SNAKE_CASE_: int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__) , lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__) , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Optional[int] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<s>")
self.assertEqual(vocab_keys[1] , "<pad>")
self.assertEqual(len(lowerCAmelCase__) , 1008)
def _SCREAMING_SNAKE_CASE ( self : Any):
self.assertEqual(self.get_tokenizer().vocab_size , 1008)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Optional[int] = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer.tokenize("This is a test")
self.assertListEqual(lowerCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE_: List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
SCREAMING_SNAKE_CASE_: Optional[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Any):
return XGLMTokenizer.from_pretrained("facebook/xglm-564M")
def _SCREAMING_SNAKE_CASE ( self : str):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase__ , f.name)
SCREAMING_SNAKE_CASE_: Tuple = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = pickle.dumps(lowerCAmelCase__)
pickle.loads(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : str):
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_: Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE_: List[str] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_: Any = "I was born in 92000, and this is falsé."
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_: str = tokenizer.encode(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: Dict = "Hello World!"
SCREAMING_SNAKE_CASE_: Union[str, Any] = [2, 3_1227, 4447, 35]
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__))
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Union[str, Any] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
SCREAMING_SNAKE_CASE_: Optional[Any] = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__))
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
# fmt: off
SCREAMING_SNAKE_CASE_: str = {
"input_ids": [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="facebook/xglm-564M" , padding=lowerCAmelCase__ , )
| 13 | 0 |
'''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
lowerCamelCase = """
import os
"""
lowerCamelCase = """
def foo():
import os
return False
"""
lowerCamelCase = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
lowerCamelCase = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
lowerCamelCase = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
lowerCamelCase = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
lowerCamelCase = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
lowerCamelCase = """
import os
try:
import bar
except:
raise ValueError()
"""
lowerCamelCase = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
lowerCamelCase = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
lowerCamelCase = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('case' , _UpperCAmelCase )
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__lowercase =os.path.join(_UpperCAmelCase , 'test_file.py' )
with open(_UpperCAmelCase , 'w' ) as _tmp_file:
_tmp_file.write(_UpperCAmelCase )
__lowercase =get_imports(_UpperCAmelCase )
assert parsed_imports == ["os"]
| 166 |
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_: Optional[int] = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
SCREAMING_SNAKE_CASE_: int = 1 - (matter_density + radiation_density + dark_energy)
SCREAMING_SNAKE_CASE_: Dict = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
SCREAMING_SNAKE_CASE_: Any = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
lowerCAmelCase : List[Any] = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 13 | 0 |
def __lowerCamelCase ( __a :Union[str, Any] = 1_0_0_0_0_0_0 ) -> Dict:
"""simple docstring"""
A__ = set(range(3 , _UpperCAmelCase , 2 ) )
primes.add(2 )
for p in range(3 , _UpperCAmelCase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _UpperCAmelCase , _UpperCAmelCase ) ) )
A__ = [float(_UpperCAmelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(_UpperCAmelCase , limit + 1 , _UpperCAmelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 274 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
lowerCAmelCase : int = logging.get_logger(__name__)
# General docstring
lowerCAmelCase : int = """MobileNetV1Config"""
# Base docstring
lowerCAmelCase : List[Any] = """google/mobilenet_v1_1.0_224"""
lowerCAmelCase : Dict = [1, 1024, 7, 7]
# Image classification docstring
lowerCAmelCase : Union[str, Any] = """google/mobilenet_v1_1.0_224"""
lowerCAmelCase : Any = """tabby, tabby cat"""
lowerCAmelCase : List[Any] = [
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
SCREAMING_SNAKE_CASE_: List[str] = {}
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = model.mobilenet_va
else:
SCREAMING_SNAKE_CASE_: int = model
SCREAMING_SNAKE_CASE_: Dict = "MobilenetV1/Conv2d_0/"
SCREAMING_SNAKE_CASE_: str = backbone.conv_stem.convolution.weight
SCREAMING_SNAKE_CASE_: List[str] = backbone.conv_stem.normalization.bias
SCREAMING_SNAKE_CASE_: int = backbone.conv_stem.normalization.weight
SCREAMING_SNAKE_CASE_: List[str] = backbone.conv_stem.normalization.running_mean
SCREAMING_SNAKE_CASE_: Optional[int] = backbone.conv_stem.normalization.running_var
for i in range(13 ):
SCREAMING_SNAKE_CASE_: List[str] = i + 1
SCREAMING_SNAKE_CASE_: Optional[int] = i * 2
SCREAMING_SNAKE_CASE_: Any = backbone.layer[pt_index]
SCREAMING_SNAKE_CASE_: Any = f"MobilenetV1/Conv2d_{tf_index}_depthwise/"
SCREAMING_SNAKE_CASE_: Any = pointer.convolution.weight
SCREAMING_SNAKE_CASE_: Any = pointer.normalization.bias
SCREAMING_SNAKE_CASE_: str = pointer.normalization.weight
SCREAMING_SNAKE_CASE_: Dict = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE_: Optional[Any] = pointer.normalization.running_var
SCREAMING_SNAKE_CASE_: Tuple = backbone.layer[pt_index + 1]
SCREAMING_SNAKE_CASE_: List[str] = f"MobilenetV1/Conv2d_{tf_index}_pointwise/"
SCREAMING_SNAKE_CASE_: int = pointer.convolution.weight
SCREAMING_SNAKE_CASE_: Any = pointer.normalization.bias
SCREAMING_SNAKE_CASE_: Optional[int] = pointer.normalization.weight
SCREAMING_SNAKE_CASE_: Optional[Any] = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE_: Dict = pointer.normalization.running_var
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = "MobilenetV1/Logits/Conv2d_1c_1x1/"
SCREAMING_SNAKE_CASE_: Optional[Any] = model.classifier.weight
SCREAMING_SNAKE_CASE_: Tuple = model.classifier.bias
return tf_to_pt_map
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions." )
raise
# Load weights from TF model
SCREAMING_SNAKE_CASE_: int = tf.train.list_variables(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = {}
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}" )
SCREAMING_SNAKE_CASE_: Any = tf.train.load_variable(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = array
# Build TF to PyTorch weights loading map
SCREAMING_SNAKE_CASE_: Optional[Any] = _build_tf_to_pytorch_map(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for name, pointer in tf_to_pt_map.items():
logger.info(f"Importing {name}" )
if name not in tf_weights:
logger.info(f"{name} not in tf pre-trained weights, skipping" )
continue
SCREAMING_SNAKE_CASE_: int = tf_weights[name]
if "depthwise_weights" in name:
logger.info("Transposing depthwise" )
SCREAMING_SNAKE_CASE_: int = np.transpose(_UpperCAmelCase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("Transposing" )
if len(pointer.shape ) == 2: # copying into linear layer
SCREAMING_SNAKE_CASE_: List[str] = array.squeeze().transpose()
else:
SCREAMING_SNAKE_CASE_: Any = np.transpose(_UpperCAmelCase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" )
logger.info(f"Initialize PyTorch weight {name} {array.shape}" )
SCREAMING_SNAKE_CASE_: int = torch.from_numpy(_UpperCAmelCase )
tf_weights.pop(_UpperCAmelCase , _UpperCAmelCase )
tf_weights.pop(name + "/RMSProp" , _UpperCAmelCase )
tf_weights.pop(name + "/RMSProp_1" , _UpperCAmelCase )
tf_weights.pop(name + "/ExponentialMovingAverage" , _UpperCAmelCase )
logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" )
return model
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = features.shape[-2:]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = conv_layer.stride
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = conv_layer.kernel_size
if in_height % stride_height == 0:
SCREAMING_SNAKE_CASE_: int = max(kernel_height - stride_height , 0 )
else:
SCREAMING_SNAKE_CASE_: Tuple = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
SCREAMING_SNAKE_CASE_: str = max(kernel_width - stride_width , 0 )
else:
SCREAMING_SNAKE_CASE_: Dict = max(kernel_width - (in_width % stride_width) , 0 )
SCREAMING_SNAKE_CASE_: str = pad_along_width // 2
SCREAMING_SNAKE_CASE_: Union[str, Any] = pad_along_width - pad_left
SCREAMING_SNAKE_CASE_: int = pad_along_height // 2
SCREAMING_SNAKE_CASE_: Tuple = pad_along_height - pad_top
SCREAMING_SNAKE_CASE_: Union[str, Any] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_UpperCAmelCase , _UpperCAmelCase , "constant" , 0.0 )
class __lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : MobileNetVaConfig , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[bool or str] = True , ):
super().__init__()
SCREAMING_SNAKE_CASE_: Optional[int] = config
if in_channels % groups != 0:
raise ValueError(F"Input channels ({in_channels}) are not divisible by {groups} groups.")
if out_channels % groups != 0:
raise ValueError(F"Output channels ({out_channels}) are not divisible by {groups} groups.")
SCREAMING_SNAKE_CASE_: int = 0 if config.tf_padding else int((kernel_size - 1) / 2)
SCREAMING_SNAKE_CASE_: Union[str, Any] = nn.Convad(
in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=lowerCAmelCase__ , groups=lowerCAmelCase__ , bias=lowerCAmelCase__ , padding_mode="zeros" , )
if use_normalization:
SCREAMING_SNAKE_CASE_: str = nn.BatchNormad(
num_features=lowerCAmelCase__ , eps=config.layer_norm_eps , momentum=0.9997 , affine=lowerCAmelCase__ , track_running_stats=lowerCAmelCase__ , )
else:
SCREAMING_SNAKE_CASE_: str = None
if use_activation:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Dict = ACTaFN[use_activation]
elif isinstance(config.hidden_act , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Dict = ACTaFN[config.hidden_act]
else:
SCREAMING_SNAKE_CASE_: Any = config.hidden_act
else:
SCREAMING_SNAKE_CASE_: int = None
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : torch.Tensor):
if self.config.tf_padding:
SCREAMING_SNAKE_CASE_: Union[str, Any] = apply_tf_padding(lowerCAmelCase__ , self.convolution)
SCREAMING_SNAKE_CASE_: Optional[int] = self.convolution(lowerCAmelCase__)
if self.normalization is not None:
SCREAMING_SNAKE_CASE_: int = self.normalization(lowerCAmelCase__)
if self.activation is not None:
SCREAMING_SNAKE_CASE_: List[Any] = self.activation(lowerCAmelCase__)
return features
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : List[str] = MobileNetVaConfig
_UpperCAmelCase : List[Any] = load_tf_weights_in_mobilenet_va
_UpperCAmelCase : List[Any] = '''mobilenet_v1'''
_UpperCAmelCase : int = '''pixel_values'''
_UpperCAmelCase : List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Union[nn.Linear, nn.Convad]):
if isinstance(lowerCAmelCase__ , (nn.Linear, nn.Convad)):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowerCAmelCase__ , nn.BatchNormad):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
lowerCAmelCase : Any = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowerCAmelCase : List[str] = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , UpperCAmelCase_ , )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase__ : MobileNetVaConfig , lowerCAmelCase__ : bool = True):
super().__init__(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = config
SCREAMING_SNAKE_CASE_: Union[str, Any] = 32
SCREAMING_SNAKE_CASE_: Dict = max(int(depth * config.depth_multiplier) , config.min_depth)
SCREAMING_SNAKE_CASE_: Tuple = MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=config.num_channels , out_channels=lowerCAmelCase__ , kernel_size=3 , stride=2 , )
SCREAMING_SNAKE_CASE_: Optional[int] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
SCREAMING_SNAKE_CASE_: str = nn.ModuleList()
for i in range(13):
SCREAMING_SNAKE_CASE_: List[Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
SCREAMING_SNAKE_CASE_: str = max(int(depth * config.depth_multiplier) , config.min_depth)
self.layer.append(
MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=3 , stride=strides[i] , groups=lowerCAmelCase__ , ))
self.layer.append(
MobileNetVaConvLayer(
lowerCAmelCase__ , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , kernel_size=1 , ))
SCREAMING_SNAKE_CASE_: List[str] = nn.AdaptiveAvgPoolad((1, 1)) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : str):
raise NotImplementedError
@add_start_docstrings_to_model_forward(lowerCAmelCase__)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE_: Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_: Any = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
SCREAMING_SNAKE_CASE_: Optional[Any] = self.conv_stem(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
SCREAMING_SNAKE_CASE_: Tuple = layer_module(lowerCAmelCase__)
if output_hidden_states:
SCREAMING_SNAKE_CASE_: Optional[int] = all_hidden_states + (hidden_states,)
SCREAMING_SNAKE_CASE_: Optional[Any] = hidden_states
if self.pooler is not None:
SCREAMING_SNAKE_CASE_: int = torch.flatten(self.pooler(lowerCAmelCase__) , start_dim=1)
else:
SCREAMING_SNAKE_CASE_: List[str] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None)
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ , )
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , UpperCAmelCase_ , )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : MobileNetVaConfig):
super().__init__(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = config.num_labels
SCREAMING_SNAKE_CASE_: Dict = MobileNetVaModel(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
SCREAMING_SNAKE_CASE_: str = nn.Dropout(config.classifier_dropout_prob , inplace=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = nn.Linear(lowerCAmelCase__ , config.num_labels) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[torch.Tensor] = None , lowerCAmelCase__ : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE_: List[str] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_: List[str] = self.mobilenet_va(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE_: Tuple = self.classifier(self.dropout(lowerCAmelCase__))
SCREAMING_SNAKE_CASE_: Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE_: List[Any] = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE_: int = "single_label_classification"
else:
SCREAMING_SNAKE_CASE_: str = "multi_label_classification"
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE_: Dict = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE_: Any = loss_fct(logits.squeeze() , labels.squeeze())
else:
SCREAMING_SNAKE_CASE_: int = loss_fct(lowerCAmelCase__ , lowerCAmelCase__)
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE_: Any = CrossEntropyLoss()
SCREAMING_SNAKE_CASE_: Dict = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE_: Dict = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE_: Dict = loss_fct(lowerCAmelCase__ , lowerCAmelCase__)
if not return_dict:
SCREAMING_SNAKE_CASE_: int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states , )
| 13 | 0 |
'''simple docstring'''
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
a : Any = False
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self , snake_case=3_2 ):
'''simple docstring'''
set_seed(0 )
UpperCAmelCase : Optional[int] = UNetaDModel(sample_size=lowerCAmelCase__ , in_channels=3 , out_channels=3 )
UpperCAmelCase : str = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = "cpu" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
UpperCAmelCase : Dict = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="linear" , clip_sample=lowerCAmelCase__ , )
UpperCAmelCase : Optional[Any] = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.0001 , beta_end=0.02 , beta_schedule="linear" , clip_sample=lowerCAmelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
UpperCAmelCase : Tuple = [torch.randn((4, 3, 3_2, 3_2) ).clip(-1 , 1 ).to(lowerCAmelCase__ ) for _ in range(4 )]
UpperCAmelCase : List[str] = [torch.randn((4, 3, 3_2, 3_2) ).to(lowerCAmelCase__ ) for _ in range(4 )]
UpperCAmelCase : Union[str, Any] = [torch.randint(0 , 1_0_0_0 , (4,) ).long().to(lowerCAmelCase__ ) for _ in range(4 )]
# train with a DDPM scheduler
UpperCAmelCase : Tuple = self.get_model_optimizer(resolution=3_2 )
model.train().to(lowerCAmelCase__ )
for i in range(4 ):
optimizer.zero_grad()
UpperCAmelCase : Tuple = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
UpperCAmelCase : Union[str, Any] = model(lowerCAmelCase__ , timesteps[i] ).sample
UpperCAmelCase : str = torch.nn.functional.mse_loss(lowerCAmelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
UpperCAmelCase : Union[str, Any] = self.get_model_optimizer(resolution=3_2 )
model.train().to(lowerCAmelCase__ )
for i in range(4 ):
optimizer.zero_grad()
UpperCAmelCase : List[Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
UpperCAmelCase : List[str] = model(lowerCAmelCase__ , timesteps[i] ).sample
UpperCAmelCase : Optional[int] = torch.nn.functional.mse_loss(lowerCAmelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-5 ) )
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-5 ) )
| 311 |
def A_ ( _UpperCAmelCase , _UpperCAmelCase = False ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = f"Expected string as input, found {type(_UpperCAmelCase )}"
raise ValueError(_UpperCAmelCase )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = f"Expected boolean as use_pascal parameter, found {type(_UpperCAmelCase )}"
raise ValueError(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple = input_str.split("_" )
SCREAMING_SNAKE_CASE_: str = 0 if use_pascal else 1
SCREAMING_SNAKE_CASE_: int = words[start_index:]
SCREAMING_SNAKE_CASE_: List[str] = [word[0].upper() + word[1:] for word in words_to_capitalize]
SCREAMING_SNAKE_CASE_: List[Any] = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 13 | 0 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : int = AlbertTokenizer
__UpperCAmelCase : Tuple = AlbertTokenizerFast
__UpperCAmelCase : int = True
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : Tuple = True
def lowercase_ (self : Optional[Any] ) -> int:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ = AlbertTokenizer(lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ (self : int , __UpperCAmelCase : Dict ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = "this is a test"
UpperCAmelCase__ = "this is a test"
return input_text, output_text
def lowercase_ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = "<pad>"
UpperCAmelCase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def lowercase_ (self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "▁eloquent" )
self.assertEqual(len(lowerCAmelCase__ ) , 3_0_0_0_0 )
def lowercase_ (self : int ) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def lowercase_ (self : List[str] ) -> Tuple:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = "I was born in 92000, and this is falsé."
UpperCAmelCase__ = tokenizer.tokenize(lowerCAmelCase__ )
UpperCAmelCase__ = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
UpperCAmelCase__ = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = tokenizer.encode(lowerCAmelCase__ )
UpperCAmelCase__ = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase_ (self : Optional[int] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = AlbertTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
UpperCAmelCase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCAmelCase__ , ["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [4_8, 2_5, 2_1, 1_2_8_9] )
UpperCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCAmelCase__ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , [3_1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9] )
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , )
def lowercase_ (self : Tuple ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = AlbertTokenizer(lowerCAmelCase__ )
UpperCAmelCase__ = tokenizer.encode("sequence builders" )
UpperCAmelCase__ = tokenizer.encode("multi-sequence build" )
UpperCAmelCase__ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
UpperCAmelCase__ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def lowercase_ (self : str ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 2_1_9_7_0, 1_3, 5, 6_0_9_2, 1_6_7, 2_8, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 7_0_2_8, 1_2_0_5_1, 1_8, 1_7, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 3_5_1_5, 1_8_6_8_4, 8, 4_4_6_1, 6, 1_9_2_7, 2_9_7, 8, 1_2_0_6_0, 2_6_0_7, 1_8, 1_3, 5, 4_4_6_1, 1_5, 1_0_5_3_8, 3_8, 8, 1_3_5, 1_5, 8_2_2, 5_8, 1_5, 9_9_3, 1_0_3_6_3, 1_5, 1_4_6_0, 8_0_0_5, 4_4_6_1, 1_5, 9_9_3, 2_5_5, 2_3_2_8, 9, 9, 9, 6, 2_6, 1_1_1_2, 8_1_6, 3_2_6_0, 1_3, 5, 1_0_3, 2_3_7_7, 6, 1_7, 1_1_1_2, 8_1_6, 2_7_8_2, 1_3, 5, 1_0_3, 1_0_6_4_1, 6, 2_9, 8_4, 2_5_1_2, 2_4_3_0, 7_8_2, 1_8_6_8_4, 2_7_6_1, 1_9, 8_0_8, 2_4_3_0, 2_5_5_6, 1_7, 8_5_5, 1_4_8_0, 9_4_7_7, 4_0_9_1, 1_2_8, 1_1_7_1_2, 1_5, 7_1_0_3, 2_1_5_3, 6_7_3, 1_7, 2_4_8_8_3, 9_9_9_0, 9, 3], [2, 1_1_5_0_2, 2_5, 1_0_0_6, 2_0, 7_8_2, 8, 1_1_8_0_9, 8_5_5, 1_7_3_2, 1_9_3_9_3, 1_8_6_6_7, 3_7, 3_6_7, 2_1_0_1_8, 6_9, 1_8_5_4, 3_4, 1_1_8_6_0, 1_9_1_2_4, 2_7, 1_5_6, 2_2_5, 1_7, 1_9_3, 4_1_4_1, 1_9, 6_5, 9_1_2_4, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 1_4, 2_2_3_1, 8_8_6, 2_3_8_5, 1_7_6_5_9, 8_4, 1_4, 1_6_7_9_2, 1_9_5_2, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
| 65 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def A_ ( _UpperCAmelCase , _UpperCAmelCase=10 ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = []
for _ in range(_UpperCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def A_ ( _UpperCAmelCase , _UpperCAmelCase=10 ):
SCREAMING_SNAKE_CASE_: List[str] = []
for step in range(_UpperCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_: Optional[int] = os.path.join(_UpperCAmelCase , "schedule.bin" )
torch.save(scheduler.state_dict() , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.load(_UpperCAmelCase )
scheduler.load_state_dict(_UpperCAmelCase )
return lrs
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple):
self.assertEqual(len(lowerCAmelCase__) , len(lowerCAmelCase__))
for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__):
self.assertAlmostEqual(lowerCAmelCase__ , lowerCAmelCase__ , delta=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = torch.tensor([0.4, 0.2, -0.5])
SCREAMING_SNAKE_CASE_: Optional[Any] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
SCREAMING_SNAKE_CASE_: int = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0)
for _ in range(100):
SCREAMING_SNAKE_CASE_: Dict = criterion(lowerCAmelCase__ , lowerCAmelCase__)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2)
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = torch.tensor([0.4, 0.2, -0.5])
SCREAMING_SNAKE_CASE_: Any = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
SCREAMING_SNAKE_CASE_: int = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowerCAmelCase__ , weight_decay=0.0 , relative_step=lowerCAmelCase__ , scale_parameter=lowerCAmelCase__ , warmup_init=lowerCAmelCase__ , )
for _ in range(1000):
SCREAMING_SNAKE_CASE_: List[Any] = criterion(lowerCAmelCase__ , lowerCAmelCase__)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2)
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = nn.Linear(50 , 50 ) if is_torch_available() else None
_UpperCAmelCase : List[Any] = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
_UpperCAmelCase : Optional[Any] = 10
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any]=None):
self.assertEqual(len(lowerCAmelCase__) , len(lowerCAmelCase__))
for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__):
self.assertAlmostEqual(lowerCAmelCase__ , lowerCAmelCase__ , delta=lowerCAmelCase__ , msg=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Dict = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
SCREAMING_SNAKE_CASE_: Dict = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = data
SCREAMING_SNAKE_CASE_: List[Any] = scheduler_func(self.optimizer , **lowerCAmelCase__)
self.assertEqual(len([scheduler.get_lr()[0]]) , 1)
SCREAMING_SNAKE_CASE_: int = unwrap_schedule(lowerCAmelCase__ , self.num_steps)
self.assertListAlmostEqual(
lowerCAmelCase__ , lowerCAmelCase__ , tol=1E-2 , msg=F"failed for {scheduler_func} in normal scheduler" , )
SCREAMING_SNAKE_CASE_: List[str] = scheduler_func(self.optimizer , **lowerCAmelCase__)
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(lowerCAmelCase__) # wrap to test picklability of the schedule
SCREAMING_SNAKE_CASE_: Tuple = unwrap_and_save_reload_schedule(lowerCAmelCase__ , self.num_steps)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ , msg=F"failed for {scheduler_func} in save and reload")
class __lowercase :
"""simple docstring"""
def __init__( self : str , lowerCAmelCase__ : List[str]):
SCREAMING_SNAKE_CASE_: List[Any] = fn
def __call__( self : Optional[int] , *lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : Tuple):
return self.fn(*lowerCAmelCase__ , **lowerCAmelCase__)
@classmethod
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: str = list(map(self , scheduler.lr_lambdas))
| 13 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={"""vocab_file""": """sentencepiece.model"""}
__A ={
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
}
__A ={
"""google/rembert""": 2_5_6,
}
class UpperCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , a_ : Any , a_ : Optional[int]=False , a_ : Union[str, Any]=True , a_ : Any=True , a_ : List[str]="[CLS]" , a_ : Tuple="[SEP]" , a_ : List[str]="[UNK]" , a_ : Union[str, Any]="[SEP]" , a_ : List[Any]="[PAD]" , a_ : int="[CLS]" , a_ : Optional[Any]="[MASK]" , **a_ : Tuple , ):
'''simple docstring'''
super().__init__(
do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
__UpperCAmelCase : Optional[int] = do_lower_case
__UpperCAmelCase : Any = remove_space
__UpperCAmelCase : Tuple = keep_accents
__UpperCAmelCase : int = vocab_file
__UpperCAmelCase : List[Any] = spm.SentencePieceProcessor()
self.sp_model.Load(lowerCAmelCase__ )
@property
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
return len(self.sp_model )
def snake_case__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : int ):
'''simple docstring'''
__UpperCAmelCase : int = self.__dict__.copy()
__UpperCAmelCase : Any = None
return state
def __setstate__( self : str , a_ : Any ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = d
__UpperCAmelCase : Dict = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def snake_case__ ( self : Optional[Any] , a_ : Optional[Any] , a_ : List[str]=False ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.sp_model.EncodeAsPieces(lowerCAmelCase__ )
return pieces
def snake_case__ ( self : Optional[Any] , a_ : Any ):
'''simple docstring'''
return self.sp_model.PieceToId(lowerCAmelCase__ )
def snake_case__ ( self : Optional[int] , a_ : Tuple ):
'''simple docstring'''
return self.sp_model.IdToPiece(lowerCAmelCase__ )
def snake_case__ ( self : Dict , a_ : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = self.sp_model.decode_pieces(lowerCAmelCase__ )
return out_string
def snake_case__ ( self : List[str] , a_ : List[int] , a_ : Optional[List[int]] = None ):
'''simple docstring'''
__UpperCAmelCase : Dict = [self.sep_token_id]
__UpperCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__ ( self : Dict , a_ : List[int] , a_ : Optional[List[int]] = None , a_ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
def snake_case__ ( self : List[Any] , a_ : List[int] , a_ : Optional[List[int]] = None ):
'''simple docstring'''
__UpperCAmelCase : Any = [self.sep_token_id]
__UpperCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self : Any , a_ : str , a_ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(lowerCAmelCase__ ) )
return
__UpperCAmelCase : Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 226 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase_ )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : str = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_UpperCAmelCase : ClassVar[Features] = Features({'''audio''': Audio()} )
_UpperCAmelCase : ClassVar[Features] = Features({'''transcription''': Value('''string''' )} )
_UpperCAmelCase : str = "audio"
_UpperCAmelCase : str = "transcription"
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int):
if self.audio_column not in features:
raise ValueError(F"Column {self.audio_column} is not present in features.")
if not isinstance(features[self.audio_column] , lowerCAmelCase__):
raise ValueError(F"Column {self.audio_column} is not an Audio type.")
SCREAMING_SNAKE_CASE_: Tuple = copy.deepcopy(self)
SCREAMING_SNAKE_CASE_: Optional[int] = self.input_schema.copy()
SCREAMING_SNAKE_CASE_: Dict = features[self.audio_column]
SCREAMING_SNAKE_CASE_: int = input_schema
return task_template
@property
def _SCREAMING_SNAKE_CASE ( self : int):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 13 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class snake_case__ :
"""simple docstring"""
def __init__( self : List[Any], _snake_case : str, _snake_case : Optional[int]=1_3, _snake_case : Optional[int]=1_0, _snake_case : Tuple=3, _snake_case : Tuple=2, _snake_case : Union[str, Any]=2, _snake_case : Any=2, _snake_case : Optional[Any]=True, _snake_case : Union[str, Any]=True, _snake_case : Dict=3_2, _snake_case : Any=5, _snake_case : Any=4, _snake_case : Optional[Any]=3_7, _snake_case : str="gelu", _snake_case : Tuple=0.1, _snake_case : List[str]=0.1, _snake_case : Optional[Any]=1_0, _snake_case : Tuple=0.0_2, _snake_case : str=0.9, _snake_case : int=None, ) ->List[Any]:
snake_case__ : str = parent
snake_case__ : int = batch_size
snake_case__ : Dict = image_size
snake_case__ : Any = num_channels
snake_case__ : int = patch_size
snake_case__ : int = tubelet_size
snake_case__ : Optional[int] = num_frames
snake_case__ : int = is_training
snake_case__ : Dict = use_labels
snake_case__ : Union[str, Any] = hidden_size
snake_case__ : List[Any] = num_hidden_layers
snake_case__ : Optional[Any] = num_attention_heads
snake_case__ : str = intermediate_size
snake_case__ : str = hidden_act
snake_case__ : List[Any] = hidden_dropout_prob
snake_case__ : Optional[Any] = attention_probs_dropout_prob
snake_case__ : Optional[int] = type_sequence_label_size
snake_case__ : Dict = initializer_range
snake_case__ : Any = mask_ratio
snake_case__ : Dict = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
snake_case__ : Any = (image_size // patch_size) ** 2
snake_case__ : Dict = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
snake_case__ : Dict = int(mask_ratio * self.seq_length )
def lowercase_ ( self : Optional[int] ) ->int:
snake_case__ : Union[str, Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Optional[Any] = None
if self.use_labels:
snake_case__ : List[str] = ids_tensor([self.batch_size], self.type_sequence_label_size )
snake_case__ : Tuple = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : int ) ->Optional[Any]:
return VideoMAEConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_frames=self.num_frames, tubelet_size=self.tubelet_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCAmelCase__, initializer_range=self.initializer_range, )
def lowercase_ ( self : str, _snake_case : Union[str, Any], _snake_case : str, _snake_case : Optional[Any] ) ->Tuple:
snake_case__ : Optional[Any] = VideoMAEModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case__ : Union[str, Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : str, _snake_case : Tuple, _snake_case : Any, _snake_case : Union[str, Any] ) ->List[Any]:
snake_case__ : List[Any] = VideoMAEForPreTraining(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
snake_case__ : Dict = torch.ones((self.num_masks,) )
snake_case__ : Union[str, Any] = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
snake_case__ : List[str] = mask.expand(self.batch_size, -1 ).bool()
snake_case__ : List[Any] = model(lowerCAmelCase__, lowerCAmelCase__ )
# model only returns predictions for masked patches
snake_case__ : Optional[int] = mask.sum().item()
snake_case__ : str = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape, (self.batch_size, num_masked_patches, decoder_num_labels) )
def lowercase_ ( self : str ) ->Tuple:
snake_case__ : Dict = self.prepare_config_and_inputs()
snake_case__ : Any = config_and_inputs
snake_case__ : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
_SCREAMING_SNAKE_CASE = (
{'''feature-extraction''': VideoMAEModel, '''video-classification''': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def lowercase_ ( self : Tuple ) ->List[Any]:
snake_case__ : List[Any] = VideoMAEModelTester(self )
snake_case__ : Union[str, Any] = ConfigTester(self, config_class=lowerCAmelCase__, has_text_modality=lowerCAmelCase__, hidden_size=3_7 )
def lowercase_ ( self : Tuple, _snake_case : int, _snake_case : Tuple, _snake_case : Dict=False ) ->Optional[int]:
snake_case__ : Any = copy.deepcopy(lowerCAmelCase__ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
snake_case__ : List[str] = torch.ones((self.model_tester.num_masks,) )
snake_case__ : List[str] = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
snake_case__ : Dict = mask.expand(self.model_tester.batch_size, -1 ).bool()
snake_case__ : Union[str, Any] = bool_masked_pos.to(lowerCAmelCase__ )
if return_labels:
if model_class in [
*get_values(lowerCAmelCase__ ),
]:
snake_case__ : Optional[int] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase__ )
return inputs_dict
def lowercase_ ( self : int ) ->str:
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def lowercase_ ( self : Optional[Any] ) ->List[Any]:
pass
def lowercase_ ( self : Optional[Any] ) ->Dict:
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Tuple = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
snake_case__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__, nn.Linear ) )
def lowercase_ ( self : List[Any] ) ->Optional[int]:
snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : int = model_class(lowerCAmelCase__ )
snake_case__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Optional[int] = [*signature.parameters.keys()]
snake_case__ : int = ["pixel_values"]
self.assertListEqual(arg_names[:1], lowerCAmelCase__ )
def lowercase_ ( self : List[str] ) ->Union[str, Any]:
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def lowercase_ ( self : Tuple ) ->int:
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase__ )
@slow
def lowercase_ ( self : List[Any] ) ->Any:
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Dict = VideoMAEModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def lowercase_ ( self : Tuple ) ->Dict:
if not self.has_attentions:
pass
else:
snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Union[str, Any] = True
for model_class in self.all_model_classes:
snake_case__ : int = self.model_tester.seq_length - self.model_tester.num_masks
snake_case__ : Optional[Any] = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
snake_case__ : Union[str, Any] = True
snake_case__ : Optional[int] = False
snake_case__ : Optional[Any] = True
snake_case__ : Optional[int] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case__ : Any = model(**self._prepare_for_class(lowerCAmelCase__, lowerCAmelCase__ ) )
snake_case__ : List[str] = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ), self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case__ : Dict = True
snake_case__ : Union[str, Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case__ : Optional[Any] = model(**self._prepare_for_class(lowerCAmelCase__, lowerCAmelCase__ ) )
snake_case__ : Union[str, Any] = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
snake_case__ : Union[str, Any] = len(lowerCAmelCase__ )
# Check attention is always last and order is fine
snake_case__ : Any = True
snake_case__ : Tuple = True
snake_case__ : List[str] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case__ : Dict = model(**self._prepare_for_class(lowerCAmelCase__, lowerCAmelCase__ ) )
self.assertEqual(out_len + 1, len(lowerCAmelCase__ ) )
snake_case__ : str = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
def lowercase_ ( self : Any ) ->str:
def check_hidden_states_output(_snake_case : Optional[Any], _snake_case : Union[str, Any], _snake_case : Any ):
snake_case__ : Optional[int] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case__ : Optional[Any] = model(**self._prepare_for_class(lowerCAmelCase__, lowerCAmelCase__ ) )
snake_case__ : Dict = outputs.hidden_states
snake_case__ : Dict = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCAmelCase__ ), lowerCAmelCase__ )
snake_case__ : Tuple = self.model_tester.seq_length - self.model_tester.num_masks
snake_case__ : Union[str, Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [seq_length, self.model_tester.hidden_size], )
snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[str] = True
check_hidden_states_output(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : Dict = True
check_hidden_states_output(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self : List[str] ) ->Union[str, Any]:
pass
def lowercase_ ():
snake_case__ : Dict = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
snake_case__ : Optional[Any] = np.load(_UpperCAmelCase )
return list(_UpperCAmelCase )
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self : Tuple ) ->Optional[Any]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowercase_ ( self : Any ) ->str:
snake_case__ : Optional[int] = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
lowerCAmelCase__ )
snake_case__ : Optional[int] = self.default_image_processor
snake_case__ : int = prepare_video()
snake_case__ : Tuple = image_processor(lowerCAmelCase__, return_tensors='pt' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
snake_case__ : Union[str, Any] = model(**lowerCAmelCase__ )
# verify the logits
snake_case__ : str = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape, lowerCAmelCase__ )
snake_case__ : List[Any] = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCAmelCase__, atol=1e-4 ) )
@slow
def lowercase_ ( self : Tuple ) ->Any:
snake_case__ : Union[str, Any] = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(lowerCAmelCase__ )
snake_case__ : Any = self.default_image_processor
snake_case__ : Optional[int] = prepare_video()
snake_case__ : Optional[int] = image_processor(lowerCAmelCase__, return_tensors='pt' ).to(lowerCAmelCase__ )
# add boolean mask, indicating which patches to mask
snake_case__ : List[Any] = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos', filename='bool_masked_pos.pt' )
snake_case__ : str = torch.load(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
snake_case__ : Optional[int] = model(**lowerCAmelCase__ )
# verify the logits
snake_case__ : Optional[int] = torch.Size([1, 1_4_0_8, 1_5_3_6] )
snake_case__ : Optional[int] = torch.tensor(
[[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]], device=lowerCAmelCase__ )
self.assertEqual(outputs.logits.shape, lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], lowerCAmelCase__, atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
snake_case__ : str = torch.tensor([0.5_1_4_2], device=lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.loss, lowerCAmelCase__, atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
snake_case__ : int = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short', norm_pix_loss=lowerCAmelCase__ ).to(
lowerCAmelCase__ )
with torch.no_grad():
snake_case__ : Any = model(**lowerCAmelCase__ )
snake_case__ : Union[str, Any] = torch.tensor(torch.tensor([0.6_4_6_9] ), device=lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.loss, lowerCAmelCase__, atol=1e-4 ) )
| 277 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: str = jnp.ones((batch_size, length)) / length
return scores
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Dict = None
SCREAMING_SNAKE_CASE_: str = 20
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(batch_size=2 , length=lowerCAmelCase__)
# tweak scores to not be uniform anymore
SCREAMING_SNAKE_CASE_: List[str] = scores.at[1, 5].set((1 / length) + 0.1) # peak, 1st batch
SCREAMING_SNAKE_CASE_: Any = scores.at[1, 10].set((1 / length) - 0.4) # valley, 1st batch
# compute softmax
SCREAMING_SNAKE_CASE_: Dict = jax.nn.softmax(lowerCAmelCase__ , axis=-1)
SCREAMING_SNAKE_CASE_: Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5)
SCREAMING_SNAKE_CASE_: List[str] = FlaxTemperatureLogitsWarper(temperature=1.3)
SCREAMING_SNAKE_CASE_: str = jax.nn.softmax(temp_dist_warper_sharper(lowerCAmelCase__ , scores.copy() , cur_len=lowerCAmelCase__) , axis=-1)
SCREAMING_SNAKE_CASE_: int = jax.nn.softmax(temp_dist_warper_smoother(lowerCAmelCase__ , scores.copy() , cur_len=lowerCAmelCase__) , axis=-1)
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3))
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3))
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max())
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min())
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max())
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min())
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: List[str] = None
SCREAMING_SNAKE_CASE_: str = 10
SCREAMING_SNAKE_CASE_: Tuple = 2
# create ramp distribution
SCREAMING_SNAKE_CASE_: Optional[Any] = np.broadcast_to(np.arange(lowerCAmelCase__)[None, :] , (batch_size, vocab_size)).copy()
SCREAMING_SNAKE_CASE_: Dict = ramp_logits[1:, : vocab_size // 2] + vocab_size
SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxTopKLogitsWarper(3)
SCREAMING_SNAKE_CASE_: Dict = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0]).tolist() , 7 * [True] + 3 * [False])
self.assertListEqual(jnp.isinf(scores[1]).tolist() , 2 * [True] + 3 * [False] + 5 * [True])
# check special case
SCREAMING_SNAKE_CASE_: Any = 5
SCREAMING_SNAKE_CASE_: str = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3)
SCREAMING_SNAKE_CASE_: Any = np.broadcast_to(np.arange(lowerCAmelCase__)[None, :] , (batch_size, length)).copy()
SCREAMING_SNAKE_CASE_: Any = top_k_warp_safety_check(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1).tolist() , [2, 2])
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Tuple = None
SCREAMING_SNAKE_CASE_: Dict = 10
SCREAMING_SNAKE_CASE_: Dict = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
SCREAMING_SNAKE_CASE_: Tuple = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]]))
SCREAMING_SNAKE_CASE_: int = FlaxTopPLogitsWarper(0.8)
SCREAMING_SNAKE_CASE_: Optional[Any] = np.exp(top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__))
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
SCREAMING_SNAKE_CASE_: Dict = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]])
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
# check edge cases with negative and extreme logits
SCREAMING_SNAKE_CASE_: Union[str, Any] = np.broadcast_to(np.arange(lowerCAmelCase__)[None, :] , (batch_size, vocab_size)).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
SCREAMING_SNAKE_CASE_: Dict = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
SCREAMING_SNAKE_CASE_: str = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0)
SCREAMING_SNAKE_CASE_: Any = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1).tolist() , [3, 2])
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Tuple = 20
SCREAMING_SNAKE_CASE_: List[str] = 4
SCREAMING_SNAKE_CASE_: Optional[int] = 0
SCREAMING_SNAKE_CASE_: str = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__)
# check that min length is applied at length 5
SCREAMING_SNAKE_CASE_: str = ids_tensor((batch_size, 20) , vocab_size=20)
SCREAMING_SNAKE_CASE_: int = 5
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = min_dist_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf")])
# check that min length is not applied anymore at length 15
SCREAMING_SNAKE_CASE_: List[str] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = 15
SCREAMING_SNAKE_CASE_: Any = min_dist_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertFalse(jnp.isinf(lowerCAmelCase__).any())
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: int = 20
SCREAMING_SNAKE_CASE_: str = 4
SCREAMING_SNAKE_CASE_: List[Any] = 0
SCREAMING_SNAKE_CASE_: Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__)
# check that all scores are -inf except the bos_token_id score
SCREAMING_SNAKE_CASE_: int = ids_tensor((batch_size, 1) , vocab_size=20)
SCREAMING_SNAKE_CASE_: List[str] = 1
SCREAMING_SNAKE_CASE_: Union[str, Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :]).all())
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0]) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
SCREAMING_SNAKE_CASE_: List[Any] = 3
SCREAMING_SNAKE_CASE_: Optional[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertFalse(jnp.isinf(lowerCAmelCase__).any())
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Any = 20
SCREAMING_SNAKE_CASE_: Optional[Any] = 4
SCREAMING_SNAKE_CASE_: Dict = 0
SCREAMING_SNAKE_CASE_: List[Any] = 5
SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__)
# check that all scores are -inf except the eos_token_id when max_length is reached
SCREAMING_SNAKE_CASE_: List[Any] = ids_tensor((batch_size, 4) , vocab_size=20)
SCREAMING_SNAKE_CASE_: Optional[int] = 4
SCREAMING_SNAKE_CASE_: Dict = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :]).all())
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0]) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
SCREAMING_SNAKE_CASE_: List[str] = 3
SCREAMING_SNAKE_CASE_: str = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertFalse(jnp.isinf(lowerCAmelCase__).any())
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: int = 4
SCREAMING_SNAKE_CASE_: List[Any] = 10
SCREAMING_SNAKE_CASE_: int = 15
SCREAMING_SNAKE_CASE_: Dict = 2
SCREAMING_SNAKE_CASE_: int = 1
SCREAMING_SNAKE_CASE_: List[Any] = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_: int = ids_tensor((batch_size, sequence_length) , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = input_ids.copy()
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_: Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5)
SCREAMING_SNAKE_CASE_: Tuple = FlaxTopKLogitsWarper(3)
SCREAMING_SNAKE_CASE_: Optional[int] = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
SCREAMING_SNAKE_CASE_: Optional[int] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = 10
# no processor list
SCREAMING_SNAKE_CASE_: Dict = temp_dist_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = min_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = bos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = eos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# with processor list
SCREAMING_SNAKE_CASE_: str = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
SCREAMING_SNAKE_CASE_: Tuple = processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# scores should be equal
self.assertTrue(jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Optional[int] = 4
SCREAMING_SNAKE_CASE_: int = 10
SCREAMING_SNAKE_CASE_: List[str] = 15
SCREAMING_SNAKE_CASE_: List[Any] = 2
SCREAMING_SNAKE_CASE_: Union[str, Any] = 1
SCREAMING_SNAKE_CASE_: str = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_: Tuple = ids_tensor((batch_size, sequence_length) , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = input_ids.copy()
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_: Dict = FlaxTemperatureLogitsWarper(temperature=0.5)
SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxTopKLogitsWarper(3)
SCREAMING_SNAKE_CASE_: Dict = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
SCREAMING_SNAKE_CASE_: int = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = 10
# no processor list
def run_no_processor_list(lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Any = temp_dist_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = min_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = bos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = eos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
return scores
# with processor list
def run_processor_list(lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: List[str] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
SCREAMING_SNAKE_CASE_: Dict = processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
return scores
SCREAMING_SNAKE_CASE_: str = jax.jit(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = jax.jit(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = jitted_run_no_processor_list(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = jitted_run_processor_list(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
# scores should be equal
self.assertTrue(jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
| 13 | 0 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
lowercase : Optional[int] = 10
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
for i in range(_UpperCAmelCase , _UpperCAmelCase ):
if array[i] == target:
return i
return -1
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
lowercase : Optional[int] = 0
lowercase : str = len(_UpperCAmelCase )
while left <= right:
if right - left < precision:
return lin_search(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowercase : List[str] = (left + right) // 3 + 1
lowercase : Optional[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowercase : Optional[Any] = one_third - 1
elif array[two_third] < target:
lowercase : Dict = two_third + 1
else:
lowercase : List[Any] = one_third + 1
lowercase : Any = two_third - 1
else:
return -1
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
if left < right:
if right - left < precision:
return lin_search(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowercase : str = (left + right) // 3 + 1
lowercase : int = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_UpperCAmelCase , one_third - 1 , _UpperCAmelCase , _UpperCAmelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , _UpperCAmelCase , _UpperCAmelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase : int = input("""Enter numbers separated by comma:\n""").strip()
lowercase : Any = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
lowercase : Optional[int] = int(input("""Enter the number to be found in the list:\n""").strip())
lowercase : List[Any] = ite_ternary_search(collection, target)
lowercase : Dict = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'''Iterative search: {target} found at positions: {resulta}''')
print(F'''Recursive search: {target} found at positions: {resulta}''')
else:
print("""Not found""")
| 20 |
import math
import sys
def A_ ( _UpperCAmelCase ):
if number != int(_UpperCAmelCase ):
raise ValueError("the value of input must be a natural number" )
if number < 0:
raise ValueError("the value of input must not be a negative number" )
if number == 0:
return 1
SCREAMING_SNAKE_CASE_: List[str] = [-1] * (number + 1)
SCREAMING_SNAKE_CASE_: str = 0
for i in range(1 , number + 1 ):
SCREAMING_SNAKE_CASE_: str = sys.maxsize
SCREAMING_SNAKE_CASE_: List[Any] = int(math.sqrt(_UpperCAmelCase ) )
for j in range(1 , root + 1 ):
SCREAMING_SNAKE_CASE_: List[str] = 1 + answers[i - (j**2)]
SCREAMING_SNAKE_CASE_: Optional[Any] = min(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""google/vivit-b-16x2-kinetics400""": (
"""https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"""
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __snake_case ( UpperCAmelCase_ ):
__lowerCamelCase : List[str] = '''vivit'''
def __init__( self , snake_case__=224 , snake_case__=32 , snake_case__=[2, 16, 16] , snake_case__=3 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu_fast" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1e-06 , snake_case__=True , **snake_case__ , ) -> str:
'''simple docstring'''
UpperCAmelCase : Any =hidden_size
UpperCAmelCase : str =num_hidden_layers
UpperCAmelCase : Optional[Any] =num_attention_heads
UpperCAmelCase : Union[str, Any] =intermediate_size
UpperCAmelCase : Any =hidden_act
UpperCAmelCase : str =hidden_dropout_prob
UpperCAmelCase : int =attention_probs_dropout_prob
UpperCAmelCase : Dict =initializer_range
UpperCAmelCase : int =layer_norm_eps
UpperCAmelCase : Union[str, Any] =image_size
UpperCAmelCase : Any =num_frames
UpperCAmelCase : Optional[Any] =tubelet_size
UpperCAmelCase : int =num_channels
UpperCAmelCase : Optional[Any] =qkv_bias
super().__init__(**lowerCAmelCase__ )
| 348 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[int] = {"""configuration_wavlm""": ["""WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WavLMConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = [
"""WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WavLMForAudioFrameClassification""",
"""WavLMForCTC""",
"""WavLMForSequenceClassification""",
"""WavLMForXVector""",
"""WavLMModel""",
"""WavLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 13 | 0 |
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
lowerCamelCase_ = """"""
if version.parse(importlib_metadata.version("jiwer")) < version.parse("2.3.0"):
class _SCREAMING_SNAKE_CASE( tr.AbstractTransform ):
def __init__( self ,SCREAMING_SNAKE_CASE__ = " " ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Any = sentence_delimiter
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
return list(lowerCAmelCase__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = []
for sent_idx, sentence in enumerate(lowerCAmelCase__ ):
chars.extend(self.process_string(lowerCAmelCase__ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowerCAmelCase__ ) - 1:
chars.append(self.sentence_delimiter )
return chars
lowerCamelCase_ = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
lowerCamelCase_ = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
lowerCamelCase_ = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
lowerCamelCase_ = """\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
"""
lowerCamelCase_ = """
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> cer = datasets.load_metric(\"cer\")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE( datasets.Metric ):
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence''' ),
'''references''': datasets.Value('''string''' ,id='''sequence''' ),
} ) ,codebase_urls=['''https://github.com/jitsi/jiwer/'''] ,reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] ,)
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=False ) -> Dict:
"""simple docstring"""
if concatenate_texts:
return jiwer.compute_measures(
lowerCAmelCase__ ,lowerCAmelCase__ ,truth_transform=lowerCAmelCase__ ,hypothesis_transform=lowerCAmelCase__ ,)["wer"]
__SCREAMING_SNAKE_CASE :Union[str, Any] = 0
__SCREAMING_SNAKE_CASE :Any = 0
for prediction, reference in zip(lowerCAmelCase__ ,lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE :List[Any] = jiwer.compute_measures(
lowerCAmelCase__ ,lowerCAmelCase__ ,truth_transform=lowerCAmelCase__ ,hypothesis_transform=lowerCAmelCase__ ,)
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 191 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : List[Any] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_UpperCAmelCase : str = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Any = TextaTextGenerationPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__)
return generator, ["Something to write", "Something else"]
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any):
SCREAMING_SNAKE_CASE_: List[Any] = generator("Something there")
self.assertEqual(lowerCAmelCase__ , [{"generated_text": ANY(lowerCAmelCase__)}])
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there"))
SCREAMING_SNAKE_CASE_: List[Any] = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=lowerCAmelCase__)
self.assertEqual(
lowerCAmelCase__ , [
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
] , )
SCREAMING_SNAKE_CASE_: Dict = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=lowerCAmelCase__)
self.assertEqual(
lowerCAmelCase__ , [
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
] , )
with self.assertRaises(lowerCAmelCase__):
generator(4)
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Optional[int] = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt")
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE_: Union[str, Any] = generator("Something there" , do_sample=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , [{"generated_text": ""}])
SCREAMING_SNAKE_CASE_: Union[str, Any] = 3
SCREAMING_SNAKE_CASE_: Any = generator(
"Something there" , num_return_sequences=lowerCAmelCase__ , num_beams=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Any = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = generator("This is a test" , do_sample=lowerCAmelCase__ , num_return_sequences=2 , return_tensors=lowerCAmelCase__)
self.assertEqual(
lowerCAmelCase__ , [
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
] , )
SCREAMING_SNAKE_CASE_: str = generator.model.config.eos_token_id
SCREAMING_SNAKE_CASE_: Union[str, Any] = "<pad>"
SCREAMING_SNAKE_CASE_: Tuple = generator(
["This is a test", "This is a second test"] , do_sample=lowerCAmelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=lowerCAmelCase__ , )
self.assertEqual(
lowerCAmelCase__ , [
[
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
],
[
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
],
] , )
@require_tf
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf")
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE_: List[Any] = generator("Something there" , do_sample=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , [{"generated_text": ""}])
| 13 | 0 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_snake_case = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class lowercase ( datasets.BuilderConfig ):
_a = None
def lowerCAmelCase_ ( snake_case_,snake_case_,):
import pyspark
def generate_fn():
_A : List[Any] = df.select("""*""",pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
_A : Optional[Any] = df_with_partition_id.select("""*""" ).where(f'''part_id = {partition_id}''' ).drop("""part_id""" )
_A : int = partition_df.collect()
_A : Optional[int] = 0
for row in rows:
yield f'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class lowercase ( _BaseExamplesIterable ):
def __init__( self , _a , _a=None , ) -> Any:
_A : Optional[Any] = df
_A : Tuple = partition_order or range(self.df.rdd.getNumPartitions() )
_A : Tuple = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ) -> Optional[int]:
yield from self.generate_examples_fn()
def a__ ( self , _a ) -> List[str]:
_A : List[str] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowerCAmelCase__ )
return SparkExamplesIterable(self.df , partition_order=lowerCAmelCase__ )
def a__ ( self , _a , _a ) -> Optional[Any]:
_A : str = self.split_shard_indices_by_worker(lowerCAmelCase__ , lowerCAmelCase__ )
return SparkExamplesIterable(self.df , partition_order=lowerCAmelCase__ )
@property
def a__ ( self ) -> Dict:
return len(self.partition_order )
class lowercase ( datasets.DatasetBuilder ):
_a = SparkConfig
def __init__( self , _a , _a = None , _a = None , **_a , ) -> Any:
import pyspark
_A : str = pyspark.sql.SparkSession.builder.getOrCreate()
_A : List[str] = df
_A : Optional[Any] = working_dir
super().__init__(
cache_dir=lowerCAmelCase__ , config_name=str(self.df.semanticHash() ) , **lowerCAmelCase__ , )
def a__ ( self ) -> str:
# Returns the path of the created file.
def create_cache_and_write_probe(_a ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=lowerCAmelCase__ )
_A : Union[str, Any] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowerCAmelCase__ , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
_A : Dict = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowerCAmelCase__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def a__ ( self ) -> Tuple:
return datasets.DatasetInfo(features=self.config.features )
def a__ ( self , _a ) -> Dict:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def a__ ( self , _a ) -> Optional[Any]:
import pyspark
def get_arrow_batch_size(_a ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
_A : Optional[Any] = self.df.count()
_A : Tuple = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
_A : Optional[int] = (
self.df.limit(lowerCAmelCase__ )
.repartition(1 )
.mapInArrow(lowerCAmelCase__ , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
_A : Union[str, Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
_A : Any = min(lowerCAmelCase__ , int(approx_total_size / max_shard_size ) )
_A : Any = self.df.repartition(lowerCAmelCase__ )
def a__ ( self , _a , _a , _a , ) -> str:
import pyspark
_A : Dict = ParquetWriter if file_format == "parquet" else ArrowWriter
_A : Tuple = os.path.join(self._working_dir , os.path.basename(lowerCAmelCase__ ) ) if self._working_dir else fpath
_A : str = file_format == "parquet"
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
_A : List[Any] = self.config.features
_A : List[Any] = self._writer_batch_size
_A : Optional[Any] = self._fs.storage_options
def write_arrow(_a ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
_A : Tuple = pyspark.TaskContext().taskAttemptId()
_A : Optional[Any] = next(lowerCAmelCase__ , lowerCAmelCase__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
_A : Dict = 0
_A : Dict = writer_class(
features=lowerCAmelCase__ , path=working_fpath.replace("""SSSSS""" , F'''{shard_id:05d}''' ).replace("""TTTTT""" , F'''{task_id:05d}''' ) , writer_batch_size=lowerCAmelCase__ , storage_options=lowerCAmelCase__ , embed_local_files=lowerCAmelCase__ , )
_A : List[str] = pa.Table.from_batches([first_batch] )
writer.write_table(lowerCAmelCase__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
_A : Optional[int] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
_A : Optional[int] = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , F'''{shard_id:05d}''' ).replace("""TTTTT""" , F'''{task_id:05d}''' ) , writer_batch_size=lowerCAmelCase__ , storage_options=lowerCAmelCase__ , embed_local_files=lowerCAmelCase__ , )
_A : Tuple = pa.Table.from_batches([batch] )
writer.write_table(lowerCAmelCase__ )
if writer._num_bytes > 0:
_A : List[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowerCAmelCase__ ) ):
_A : List[Any] = os.path.join(os.path.dirname(lowerCAmelCase__ ) , os.path.basename(lowerCAmelCase__ ) )
shutil.move(lowerCAmelCase__ , lowerCAmelCase__ )
_A : Optional[Any] = (
self.df.mapInArrow(lowerCAmelCase__ , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def a__ ( self , _a , _a = "arrow" , _a = None , _a = None , **_a , ) -> Tuple:
self._validate_cache_dir()
_A : Dict = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowerCAmelCase__ )
_A : Union[str, Any] = not is_remote_filesystem(self._fs )
_A : List[str] = os.path.join if is_local else posixpath.join
_A : List[Any] = "-TTTTT-SSSSS-of-NNNNN"
_A : int = F'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
_A : Any = path_join(self._output_dir , lowerCAmelCase__ )
_A : List[Any] = 0
_A : Any = 0
_A : Union[str, Any] = 0
_A : Optional[Any] = []
_A : Tuple = []
for task_id, content in self._prepare_split_single(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
(
_A
) : List[str] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowerCAmelCase__ )
_A : Any = total_num_examples
_A : Any = total_num_bytes
# should rename everything at the end
logger.debug(F'''Renaming {total_shards} shards.''' )
if total_shards > 1:
_A : int = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
_A : Tuple = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
_a , _a , _a , ):
rename(
lowerCAmelCase__ , fpath.replace("""SSSSS""" , F'''{shard_id:05d}''' ).replace("""TTTTT""" , F'''{task_id:05d}''' ) , fpath.replace("""TTTTT-SSSSS""" , F'''{global_shard_id:05d}''' ).replace("""NNNNN""" , F'''{total_shards:05d}''' ) , )
_A : Tuple = []
_A : Tuple = 0
for i in range(len(lowerCAmelCase__ ) ):
_A : int = task_id_and_num_shards[i]
for shard_id in range(lowerCAmelCase__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowerCAmelCase__ , len(lowerCAmelCase__ ) ).map(lambda _a : _rename_shard(*lowerCAmelCase__ ) ).collect()
else:
# don't use any pattern
_A : Optional[int] = 0
_A : Optional[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , F'''{shard_id:05d}''' ).replace("""TTTTT""" , F'''{task_id:05d}''' ) , fpath.replace(lowerCAmelCase__ , """""" ) , )
def a__ ( self , _a , ) -> Dict:
return SparkExamplesIterable(self.df )
| 26 |
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = [0] * len(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = []
SCREAMING_SNAKE_CASE_: str = []
SCREAMING_SNAKE_CASE_: List[str] = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_UpperCAmelCase ) ):
if indegree[i] == 0:
queue.append(_UpperCAmelCase )
while queue:
SCREAMING_SNAKE_CASE_: Optional[int] = queue.pop(0 )
cnt += 1
topo.append(_UpperCAmelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_UpperCAmelCase )
if cnt != len(_UpperCAmelCase ):
print("Cycle exists" )
else:
print(_UpperCAmelCase )
# Adjacency List of Graph
lowerCAmelCase : Any = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 13 | 0 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
_snake_case = 20_48
_snake_case = 40_96
_snake_case = 42
_snake_case = os.environ.pop('''PROCESS_TRAIN''', '''false''')
_snake_case = {"""null""": 0, """short""": 1, """long""": 2, """yes""": 3, """no""": 4}
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def choose_first(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
if len(_UpperCAmelCase ) == 1:
lowerCamelCase : Dict = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
lowerCamelCase : Optional[Any] = {k: [a[k]] for k in a}
if len(a["start_token"] ) > 0:
break
return a
lowerCamelCase : Optional[int] = {"id": example["id"]}
lowerCamelCase : Dict = example["annotations"]
lowerCamelCase : List[Any] = annotation["yes_no_answer"]
if 0 in yes_no_answer or 1 in yes_no_answer:
lowerCamelCase : List[str] = ["yes"] if 1 in yes_no_answer else ["no"]
lowerCamelCase : Tuple = []
lowerCamelCase : Optional[Any] = []
lowerCamelCase : Union[str, Any] = ["<cls>"]
else:
lowerCamelCase : List[str] = ["short"]
lowerCamelCase : Optional[Any] = choose_first(annotation["short_answers"] )
if len(out["start_token"] ) == 0:
# answer will be long if short is not available
lowerCamelCase : List[str] = ["long"]
lowerCamelCase : str = choose_first(annotation["long_answer"] , is_long_answer=_UpperCAmelCase )
lowerCamelCase : str = []
answer.update(_UpperCAmelCase )
# disregard some samples
if len(answer["start_token"] ) > 1 or answer["start_token"] == answer["end_token"]:
lowerCamelCase : str = True
else:
lowerCamelCase : Dict = False
lowerCamelCase : Tuple = ["start_token", "end_token", "start_byte", "end_byte", "text"]
if not all(isinstance(answer[k] , _UpperCAmelCase ) for k in cols ):
raise ValueError("Issue in ID" , example["id"] )
return answer
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = _get_single_answer(_UpperCAmelCase )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowerCamelCase : Tuple = example["document"]["tokens"]
lowerCamelCase : Any = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
return {
"context": " ".join(_UpperCAmelCase ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
lowerCamelCase : Optional[int] = ["start_token", "end_token"]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
lowerCamelCase : List[str] = example["document"]["tokens"]
lowerCamelCase : Optional[int] = answer["start_token"]
lowerCamelCase : Union[str, Any] = answer["end_token"]
lowerCamelCase : Any = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
lowerCamelCase : Optional[Any] = " ".join(context[start_token:end_token] )
# checking above code
if assertion:
lowerCamelCase : Optional[Any] = doc["is_html"][answer["start_token"] : answer["end_token"]]
lowerCamelCase : Tuple = doc["token"][answer["start_token"] : answer["end_token"]]
lowerCamelCase : str = " ".join([old[i] for i in range(len(_UpperCAmelCase ) ) if not is_html[i]] )
if new != old:
print("ID:" , example["id"] )
print("New:" , _UpperCAmelCase , end="\n" )
print("Old:" , _UpperCAmelCase , end="\n\n" )
return {
"context": " ".join(_UpperCAmelCase ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=2048 , SCREAMING_SNAKE_CASE_=4096 , SCREAMING_SNAKE_CASE_=True ):
'''simple docstring'''
lowerCamelCase : Dict = get_context_and_ans(_UpperCAmelCase , assertion=_UpperCAmelCase )
lowerCamelCase : int = out["answer"]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
lowerCamelCase : Union[str, Any] = tokenizer(example["question"]["text"] , out["context"] ).input_ids
lowerCamelCase : List[Any] = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowerCamelCase : Union[str, Any] = []
lowerCamelCase : Union[str, Any] = []
lowerCamelCase : Tuple = input_ids[:q_len]
lowerCamelCase : int = range(_UpperCAmelCase , len(_UpperCAmelCase ) , max_length - doc_stride )
for i in doc_start_indices:
lowerCamelCase : Optional[int] = i + max_length - q_len
lowerCamelCase : Dict = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["category"][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(_UpperCAmelCase ),
"end_token": [-100] * len(_UpperCAmelCase ),
"category": category,
},
}
lowerCamelCase : Dict = out["context"].split()
lowerCamelCase : Optional[int] = splitted_context[answer["end_token"]]
lowerCamelCase : Union[str, Any] = len(
tokenizer(
" ".join(splitted_context[: answer["start_token"]] ) , add_special_tokens=_UpperCAmelCase , ).input_ids )
lowerCamelCase : int = len(
tokenizer(" ".join(splitted_context[: answer["end_token"]] ) , add_special_tokens=_UpperCAmelCase ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
lowerCamelCase : List[str] = len(tokenizer(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
lowerCamelCase : List[str] = input_ids[answer["start_token"] : answer["end_token"] + 1] # right & left are inclusive
lowerCamelCase : List[Any] = answer["start_token"]
lowerCamelCase : List[Any] = answer["end_token"]
if assertion:
lowerCamelCase : Optional[Any] = tokenizer.decode(_UpperCAmelCase )
if answer["span"] != new:
print("ISSUE IN TOKENIZATION" )
print("OLD:" , answer["span"] )
print("NEW:" , _UpperCAmelCase , end="\n\n" )
if len(_UpperCAmelCase ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
lowerCamelCase : Union[str, Any] = input_ids[:q_len]
lowerCamelCase : List[str] = range(_UpperCAmelCase , len(_UpperCAmelCase ) , max_length - doc_stride )
lowerCamelCase : Tuple = []
lowerCamelCase : Optional[Any] = []
lowerCamelCase : List[Any] = []
lowerCamelCase : str = [] # null, yes, no, long, short
for i in doc_start_indices:
lowerCamelCase : List[Any] = i + max_length - q_len
lowerCamelCase : Tuple = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
lowerCamelCase : int = start_token - i + q_len
lowerCamelCase : Optional[Any] = end_token - i + q_len
answers_category.append(answer["category"][0] ) # ["short"] -> "short"
else:
lowerCamelCase : Union[str, Any] = -100
lowerCamelCase : Union[str, Any] = -100
answers_category.append("null" )
lowerCamelCase : List[Any] = inputs[-1][start_token : end_token + 1]
answers_start_token.append(_UpperCAmelCase )
answers_end_token.append(_UpperCAmelCase )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("ISSUE in strided for ID:" , example["id"] )
print("New:" , tokenizer.decode(_UpperCAmelCase ) )
print("Old:" , tokenizer.decode(_UpperCAmelCase ) , end="\n\n" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=2048 , SCREAMING_SNAKE_CASE_=4096 , SCREAMING_SNAKE_CASE_=False ):
'''simple docstring'''
lowerCamelCase : str = get_strided_contexts_and_ans(
_UpperCAmelCase , _UpperCAmelCase , doc_stride=_UpperCAmelCase , max_length=_UpperCAmelCase , assertion=_UpperCAmelCase , )
return example
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
with jsonlines.open(_UpperCAmelCase , "a" ) as writer:
for example in tqdm(_UpperCAmelCase , total=len(_UpperCAmelCase ) , desc="Saving samples ... " ):
lowerCamelCase : Tuple = example["labels"]
for ids, start, end, cat in zip(
example["input_ids"] , labels["start_token"] , labels["end_token"] , labels["category"] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"input_ids": ids,
"start_token": start,
"end_token": end,
"category": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
_snake_case = load_dataset('''natural_questions''')
_snake_case = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
_snake_case = data["""train""" if PROCESS_TRAIN == """true""" else """validation"""]
_snake_case = {
"""tokenizer""": tokenizer,
"""doc_stride""": DOC_STRIDE,
"""max_length""": MAX_LENGTH,
"""assertion""": False,
}
_snake_case = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
_snake_case = data.remove_columns(['''annotations''', '''document''', '''id''', '''question'''])
print(data)
np.random.seed(SEED)
_snake_case = """nq-training.jsonl""" if PROCESS_TRAIN == """true""" else """nq-validation.jsonl"""
save_to_disk(data, file_name=cache_file_name)
| 283 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase : Optional[Any] = 16
lowerCAmelCase : List[str] = 32
def A_ ( _UpperCAmelCase , _UpperCAmelCase = 16 ):
SCREAMING_SNAKE_CASE_: Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE_: List[Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_: Tuple = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE_: List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE_: List[Any] = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE_: Optional[Any] = 8
else:
SCREAMING_SNAKE_CASE_: List[str] = None
return tokenizer.pad(
_UpperCAmelCase , padding="longest" , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Dict = DataLoader(
tokenized_datasets["train"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase , drop_last=(accelerator.mixed_precision == "fp8") , )
return train_dataloader, eval_dataloader
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
# Initialize accelerator
SCREAMING_SNAKE_CASE_: str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_: int = config["lr"]
SCREAMING_SNAKE_CASE_: Any = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE_: Optional[int] = int(config["seed"] )
SCREAMING_SNAKE_CASE_: List[Any] = int(config["batch_size"] )
SCREAMING_SNAKE_CASE_: List[str] = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE_: Optional[int] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE_: Tuple = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE_: Dict = MAX_GPU_BATCH_SIZE
set_seed(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_: List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_: Tuple = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_: Optional[int] = AdamW(params=model.parameters() , lr=_UpperCAmelCase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE_: Optional[int] = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE_: Tuple = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] = outputs.loss
SCREAMING_SNAKE_CASE_: Tuple = loss / gradient_accumulation_steps
accelerator.backward(_UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
SCREAMING_SNAKE_CASE_: List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , _UpperCAmelCase )
def A_ ( ):
SCREAMING_SNAKE_CASE_: Any = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
SCREAMING_SNAKE_CASE_: Optional[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE_: Optional[int] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 13 | 0 |
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def _A ( _lowerCAmelCase ):
"""simple docstring"""
if isinstance(_UpperCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class _UpperCamelCase :
'''simple docstring'''
def __lowerCamelCase ( self : Any , _lowerCAmelCase : str , _lowerCAmelCase : int):
'''simple docstring'''
pass
def __lowerCamelCase ( self : str):
'''simple docstring'''
pass
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
pass
def __lowerCamelCase ( self : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : Tuple):
'''simple docstring'''
__lowercase =VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__)
__lowercase =TFVisionTextDualEncoderModel(lowerCAmelCase__)
__lowercase =model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__)
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim))
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim))
def __lowerCamelCase ( self : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : Dict , _lowerCAmelCase : Any=None , **_lowerCAmelCase : List[str]):
'''simple docstring'''
__lowercase =self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__)
__lowercase =TFVisionTextDualEncoderModel(vision_model=lowerCAmelCase__ , text_model=lowerCAmelCase__)
__lowercase =model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__)
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim))
def __lowerCamelCase ( self : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int]=None , **_lowerCAmelCase : str):
'''simple docstring'''
__lowercase =self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__)
__lowercase ={"vision_model": vision_model, "text_model": text_model}
__lowercase =TFVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__)
__lowercase =model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__)
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim))
def __lowerCamelCase ( self : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any]=None , **_lowerCAmelCase : Dict):
'''simple docstring'''
__lowercase =self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__)
__lowercase =TFVisionTextDualEncoderModel(vision_model=lowerCAmelCase__ , text_model=lowerCAmelCase__)
__lowercase =model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__)
__lowercase =output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__)
__lowercase =TFVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__)
__lowercase =model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__)
__lowercase =after_output[0].numpy()
__lowercase =np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowerCAmelCase__ , 1e-5)
def __lowerCamelCase ( self : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any]=None , **_lowerCAmelCase : List[Any]):
'''simple docstring'''
__lowercase =self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__)
__lowercase =TFVisionTextDualEncoderModel(vision_model=lowerCAmelCase__ , text_model=lowerCAmelCase__)
__lowercase =model(
input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , output_attentions=lowerCAmelCase__)
__lowercase =output.vision_model_output.attentions
self.assertEqual(len(lowerCAmelCase__) , vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase =to_atuple(vision_model.config.image_size)
__lowercase =to_atuple(vision_model.config.patch_size)
__lowercase =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase =num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
__lowercase =output.text_model_output.attentions
self.assertEqual(len(lowerCAmelCase__) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __lowerCamelCase ( self : str , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : float):
'''simple docstring'''
__lowercase =np.abs((a - b)).max()
self.assertLessEqual(lowerCAmelCase__ , lowerCAmelCase__ , f"""Difference between torch and flax is {diff} (>= {tol}).""")
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**lowerCAmelCase__)
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCAmelCase__)
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase__)
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =self.prepare_config_and_inputs()
self.check_save_load(**lowerCAmelCase__)
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCAmelCase__)
@slow
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase =self.get_pretrained_model_and_inputs()
__lowercase =model_a(**lowerCAmelCase__)
__lowercase =outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCAmelCase__)
__lowercase =TFVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__)
__lowercase =model_a(**lowerCAmelCase__)
__lowercase =after_outputs[0].numpy()
__lowercase =np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowerCAmelCase__ , 1e-5)
@require_tf
class _UpperCamelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert')
__lowercase =1_3
__lowercase =floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
__lowercase =ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
__lowercase =random_attention_mask([batch_size, 4])
__lowercase ={"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __lowerCamelCase ( self : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any]):
'''simple docstring'''
__lowercase =TFViTModel(lowerCAmelCase__ , name='vision_model')
__lowercase =TFBertModel(lowerCAmelCase__ , name='text_model')
return vision_model, text_model
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =TFViTModelTester(self)
__lowercase =TFBertModelTester(self)
__lowercase =vit_model_tester.prepare_config_and_inputs()
__lowercase =bert_model_tester.prepare_config_and_inputs()
__lowercase =vision_config_and_inputs
(
__lowercase
) =text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _UpperCamelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta')
__lowercase =1_3
__lowercase =floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
__lowercase =ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
__lowercase =random_attention_mask([batch_size, 4])
__lowercase ={"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __lowerCamelCase ( self : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any]=None , **_lowerCAmelCase : str):
'''simple docstring'''
__lowercase =self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__)
__lowercase =TFVisionTextDualEncoderModel(vision_model=lowerCAmelCase__ , text_model=lowerCAmelCase__)
__lowercase =model(
input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , output_attentions=lowerCAmelCase__)
__lowercase =output.vision_model_output.attentions
self.assertEqual(len(lowerCAmelCase__) , vision_config.num_hidden_layers)
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__lowercase =to_atuple(vision_model.config.image_size)
__lowercase =to_atuple(vision_model.config.patch_size)
__lowercase =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase =num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
__lowercase =output.text_model_output.attentions
self.assertEqual(len(lowerCAmelCase__) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __lowerCamelCase ( self : int , _lowerCAmelCase : int , _lowerCAmelCase : str):
'''simple docstring'''
__lowercase =TFDeiTModel(lowerCAmelCase__ , name='vision_model')
__lowercase =TFRobertaModel(lowerCAmelCase__ , name='text_model')
return vision_model, text_model
def __lowerCamelCase ( self : Any):
'''simple docstring'''
__lowercase =TFDeiTModelTester(self)
__lowercase =TFRobertaModelTester(self)
__lowercase =vit_model_tester.prepare_config_and_inputs()
__lowercase =bert_model_tester.prepare_config_and_inputs()
__lowercase =vision_config_and_inputs
(
__lowercase
) =text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _UpperCamelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert')
__lowercase =1_3
__lowercase =floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
__lowercase =ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
__lowercase =random_attention_mask([batch_size, 4])
__lowercase ={"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Any):
'''simple docstring'''
__lowercase =TFCLIPVisionModel(lowerCAmelCase__ , name='vision_model')
__lowercase =TFBertModel(lowerCAmelCase__ , name='text_model')
return vision_model, text_model
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =TFCLIPVisionModelTester(self)
__lowercase =TFBertModelTester(self)
__lowercase =clip_model_tester.prepare_config_and_inputs()
__lowercase =bert_model_tester.prepare_config_and_inputs()
__lowercase =vision_config_and_inputs
(
__lowercase
) =text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=lowerCAmelCase__)
__lowercase =VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian')
__lowercase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
__lowercase =processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors='np')
__lowercase =model(**lowerCAmelCase__)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowercase =np.array([[1.228_4727, 0.310_4122]])
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , lowerCAmelCase__ , atol=1e-3))
| 166 |
from collections.abc import Callable
class __lowercase :
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Callable | None = None):
# Stores actual heap items.
SCREAMING_SNAKE_CASE_: list = []
# Stores indexes of each item for supporting updates and deletion.
SCREAMING_SNAKE_CASE_: dict = {}
# Stores current size of heap.
SCREAMING_SNAKE_CASE_: Optional[Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
SCREAMING_SNAKE_CASE_: Any = key or (lambda lowerCAmelCase__: x)
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : int):
return int((i - 1) / 2) if i > 0 else None
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Union[str, Any] = int(2 * i + 1)
return left if 0 < left < self.size else None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Union[str, Any] = int(2 * i + 2)
return right if 0 < right < self.size else None
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = self.arr[j], self.arr[i]
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
return self.arr[i][1] < self.arr[j][1]
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Any = self._left(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = self._right(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = i
if left is not None and not self._cmp(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Optional[int] = left
if right is not None and not self._cmp(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Tuple = right
return valid_parent
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: List[Any] = self._parent(lowerCAmelCase__)
while parent is not None and not self._cmp(lowerCAmelCase__ , lowerCAmelCase__):
self._swap(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = parent, self._parent(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Optional[int] = self._get_valid_parent(lowerCAmelCase__)
while valid_parent != index:
self._swap(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = valid_parent, self._get_valid_parent(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
if item not in self.pos_map:
return
SCREAMING_SNAKE_CASE_: Any = self.pos_map[item]
SCREAMING_SNAKE_CASE_: int = [item, self.key(lowerCAmelCase__)]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowerCAmelCase__)
self._heapify_down(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : int):
if item not in self.pos_map:
return
SCREAMING_SNAKE_CASE_: Optional[Any] = self.pos_map[item]
del self.pos_map[item]
SCREAMING_SNAKE_CASE_: List[str] = self.arr[self.size - 1]
SCREAMING_SNAKE_CASE_: Tuple = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowerCAmelCase__)
self._heapify_down(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Optional[int] = len(self.arr)
if arr_len == self.size:
self.arr.append([item, self.key(lowerCAmelCase__)])
else:
SCREAMING_SNAKE_CASE_: str = [item, self.key(lowerCAmelCase__)]
SCREAMING_SNAKE_CASE_: List[Any] = self.size
self.size += 1
self._heapify_up(self.size - 1)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return self.arr[0] if self.size else None
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Dict = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0])
return top_item_tuple
def A_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class A (unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict=13 , __lowerCAmelCase : int=7 , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Dict=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Dict=99 , __lowerCAmelCase : List[Any]=32 , __lowerCAmelCase : Dict=5 , __lowerCAmelCase : Union[str, Any]=4 , __lowerCAmelCase : List[str]=37 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Tuple=5_12 , __lowerCAmelCase : Dict=16 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Dict=0.0_2 , __lowerCAmelCase : List[str]=4 , ) -> Dict:
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_attention_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_choices
def a_ ( self : int ) -> Optional[int]:
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_attention_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def a_ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ = config_and_inputs
A__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class A (UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[Any] = True
__lowerCamelCase : str = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a_ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
A__ = FlaxRoFormerModelTester(self )
@slow
def a_ ( self : Optional[Any] ) -> int:
"""simple docstring"""
for model_class_name in self.all_model_classes:
A__ = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=lowerCAmelCase__ )
A__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase__ )
@require_flax
class A (unittest.TestCase ):
'''simple docstring'''
@slow
def a_ ( self : List[str] ) -> Any:
"""simple docstring"""
A__ = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
A__ = jnp.array([[0, 1, 2, 3, 4, 5]] )
A__ = model(lowerCAmelCase__ )[0]
A__ = 5_00_00
A__ = (1, 6, vocab_size)
self.assertEqual(output.shape , lowerCAmelCase__ )
A__ = jnp.array(
[[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1e-4 ) )
| 274 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCAmelCase : Any = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCAmelCase : Dict = TaTokenizerFast
lowerCAmelCase : Optional[int] = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(
__name__,
globals()["""__file__"""],
_import_structure,
extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast},
module_spec=__spec__,
)
| 13 | 0 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCamelCase__ ( datasets.BuilderConfig ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[datasets.Features] = None
class UpperCamelCase__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = PandasConfig
def A_ ( self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def A_ ( self , snake_case ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" )
UpperCAmelCase : List[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
UpperCAmelCase : List[str] = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase : int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase : int = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
UpperCAmelCase : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase : List[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCAmelCase : str = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def A_ ( self , snake_case ):
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCAmelCase : List[str] = table_cast(lowerCAmelCase__ , self.config.features.arrow_schema )
return pa_table
def A_ ( self , snake_case ):
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
with open(lowerCAmelCase__ , "rb" ) as f:
UpperCAmelCase : Tuple = pa.Table.from_pandas(pd.read_pickle(lowerCAmelCase__ ) )
yield i, self._cast_table(lowerCAmelCase__ )
| 311 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : List[str] = ShapEPipeline
_UpperCAmelCase : Tuple = ['''prompt''']
_UpperCAmelCase : Dict = ['''prompt''']
_UpperCAmelCase : Any = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
_UpperCAmelCase : Optional[int] = False
@property
def _SCREAMING_SNAKE_CASE ( self : List[str]):
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : List[str]):
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : int):
return self.time_input_dim * 4
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return 8
@property
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
return tokenizer
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowerCAmelCase__)
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Tuple = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
SCREAMING_SNAKE_CASE_: Any = PriorTransformer(**lowerCAmelCase__)
return model
@property
def _SCREAMING_SNAKE_CASE ( self : Dict):
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_: Union[str, Any] = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE_: Optional[int] = ShapERenderer(**lowerCAmelCase__)
return model
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Dict = self.dummy_prior
SCREAMING_SNAKE_CASE_: Optional[Any] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.dummy_tokenizer
SCREAMING_SNAKE_CASE_: List[str] = self.dummy_renderer
SCREAMING_SNAKE_CASE_: Any = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1024 , prediction_type="sample" , use_karras_sigmas=lowerCAmelCase__ , clip_sample=lowerCAmelCase__ , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE_: Optional[int] = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]=0):
if str(lowerCAmelCase__).startswith("mps"):
SCREAMING_SNAKE_CASE_: Optional[Any] = torch.manual_seed(lowerCAmelCase__)
else:
SCREAMING_SNAKE_CASE_: Any = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: str = "cpu"
SCREAMING_SNAKE_CASE_: Tuple = self.get_dummy_components()
SCREAMING_SNAKE_CASE_: Dict = self.pipeline_class(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = pipe(**self.get_dummy_inputs(lowerCAmelCase__))
SCREAMING_SNAKE_CASE_: Optional[Any] = output.images[0]
SCREAMING_SNAKE_CASE_: Any = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE_: Union[str, Any] = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Dict = torch_device == "cpu"
SCREAMING_SNAKE_CASE_: List[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCAmelCase__ , relax_max_difference=lowerCAmelCase__ , )
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE_: str = self.pipeline_class(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = 1
SCREAMING_SNAKE_CASE_: Any = 2
SCREAMING_SNAKE_CASE_: Dict = self.get_dummy_inputs(lowerCAmelCase__)
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE_: List[Any] = batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE_: Tuple = pipe(**lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__)[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Any):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_np_out.npy")
SCREAMING_SNAKE_CASE_: List[str] = ShapEPipeline.from_pretrained("openai/shap-e")
SCREAMING_SNAKE_CASE_: Optional[int] = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = torch.Generator(device=lowerCAmelCase__).manual_seed(0)
SCREAMING_SNAKE_CASE_: int = pipe(
"a shark" , generator=lowerCAmelCase__ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__)
| 13 | 0 |
import logging
from transformers import PretrainedConfig
_lowerCamelCase : Any = logging.getLogger(__name__)
_lowerCamelCase : int = {
"""bertabs-finetuned-cnndm""": """https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json""",
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''bertabs'''
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Optional[int]=30_522 , UpperCAmelCase__ : Dict=512 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : Any=512 , UpperCAmelCase__ : Tuple=8 , UpperCAmelCase__ : int=512 , UpperCAmelCase__ : str=0.2 , UpperCAmelCase__ : int=6 , UpperCAmelCase__ : List[str]=768 , UpperCAmelCase__ : str=8 , UpperCAmelCase__ : str=2_048 , UpperCAmelCase__ : Optional[int]=0.2 , **UpperCAmelCase__ : str , ) ->Tuple:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
A__ = vocab_size
A__ = max_pos
A__ = enc_layers
A__ = enc_hidden_size
A__ = enc_heads
A__ = enc_ff_size
A__ = enc_dropout
A__ = dec_layers
A__ = dec_hidden_size
A__ = dec_heads
A__ = dec_ff_size
A__ = dec_dropout
| 14 |
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> "list[int]":
"""simple docstring"""
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
A__ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
A__ = 1
if upper_limit > 0:
A__ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowercase_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
_lowerCamelCase : List[Any] = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(F'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 14 | 1 |
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
assert isinstance(lowercase_ , lowercase_ ), f"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
A__ = f"""The input value of [n={number}] has to be > 0"""
raise ValueError(lowercase_ )
else:
A__ = sylvester(number - 1 )
A__ = num - 1
A__ = num
return lower * upper + 1
if __name__ == "__main__":
print(F'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 14 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict:
"""simple docstring"""
A__ = args.pruning_method
A__ = args.threshold
A__ = args.model_name_or_path.rstrip('''/''' )
A__ = args.target_model_path
print(f"""Load fine-pruned model from {model_name_or_path}""" )
A__ = torch.load(os.path.join(lowercase_ , '''pytorch_model.bin''' ) )
A__ = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
elif "bias" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
A__ = MagnitudeBinarizer.apply(inputs=lowercase_ , threshold=lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ = TopKBinarizer.apply(lowercase_ , lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ = ThresholdBinarizer.apply(lowercase_ , lowercase_ , lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ , A__ = -0.1, 1.1
A__ = torch.sigmoid(lowercase_ )
A__ = s * (r - l) + l
A__ = s_bar.clamp(min=0.0 , max=1.0 )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
A__ = os.path.join(
os.path.dirname(lowercase_ ) , f"""bertarized_{os.path.basename(lowercase_ )}""" )
if not os.path.isdir(lowercase_ ):
shutil.copytree(lowercase_ , lowercase_ )
print(f"""\nCreated folder {target_model_path}""" )
torch.save(lowercase_ , os.path.join(lowercase_ , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
_lowerCamelCase : int = parser.parse_args()
main(args)
| 14 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : Any = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : str = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
_lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14 |
_lowerCamelCase : Optional[int] = 65521
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
A__ = 1
A__ = 0
for plain_chr in plain_text:
A__ = (a + ord(lowercase_ )) % MOD_ADLER
A__ = (b + a) % MOD_ADLER
return (b << 16) | a
| 14 | 1 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = 42
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self : Optional[int] , UpperCAmelCase__ : int = 16 , UpperCAmelCase__ : int = 88 , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 32 , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : str = "geglu" , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , ) ->Union[str, Any]:
'''simple docstring'''
super().__init__()
A__ = num_attention_heads
A__ = attention_head_dim
A__ = num_attention_heads * attention_head_dim
A__ = in_channels
A__ = torch.nn.GroupNorm(num_groups=UpperCAmelCase__ , num_channels=UpperCAmelCase__ , eps=1e-6 , affine=UpperCAmelCase__)
A__ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__)
# 3. Define transformers blocks
A__ = nn.ModuleList(
[
BasicTransformerBlock(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , dropout=UpperCAmelCase__ , cross_attention_dim=UpperCAmelCase__ , activation_fn=UpperCAmelCase__ , attention_bias=UpperCAmelCase__ , double_self_attention=UpperCAmelCase__ , norm_elementwise_affine=UpperCAmelCase__ , )
for d in range(UpperCAmelCase__)
])
A__ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : int=None , UpperCAmelCase__ : bool = True , ) ->List[str]:
'''simple docstring'''
A__ , A__ , A__ , A__ = hidden_states.shape
A__ = batch_frames // num_frames
A__ = hidden_states
A__ = hidden_states[None, :].reshape(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
A__ = hidden_states.permute(0 , 2 , 1 , 3 , 4)
A__ = self.norm(UpperCAmelCase__)
A__ = hidden_states.permute(0 , 3 , 4 , 2 , 1).reshape(batch_size * height * width , UpperCAmelCase__ , UpperCAmelCase__)
A__ = self.proj_in(UpperCAmelCase__)
# 2. Blocks
for block in self.transformer_blocks:
A__ = block(
UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , timestep=UpperCAmelCase__ , cross_attention_kwargs=UpperCAmelCase__ , class_labels=UpperCAmelCase__ , )
# 3. Output
A__ = self.proj_out(UpperCAmelCase__)
A__ = (
hidden_states[None, None, :]
.reshape(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
.permute(0 , 3 , 4 , 1 , 2)
.contiguous()
)
A__ = hidden_states.reshape(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
A__ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=UpperCAmelCase__)
| 14 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCamelCase : Union[str, Any] = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCamelCase : str = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCamelCase : str = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCamelCase : Any = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
_lowerCamelCase : List[str] = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
_lowerCamelCase : Tuple = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
_lowerCamelCase : Optional[Any] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCamelCase : Optional[int] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCamelCase : Optional[Any] = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = DPRContextEncoderTokenizer
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = DPRQuestionEncoderTokenizer
_lowerCamelCase : int = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
_lowerCamelCase : Any = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
_lowerCamelCase : Dict = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(UpperCAmelCase__ )
class UpperCamelCase_ :
'''simple docstring'''
def __call__( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Optional[bool] = None , **UpperCAmelCase__ : Optional[int] , ) ->BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , )
elif titles is None or texts is None:
A__ = titles if texts is None else texts
return super().__call__(
UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , )
A__ = titles if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [titles]
A__ = texts if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [texts]
A__ = len(UpperCAmelCase__)
A__ = questions if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [questions] * n_passages
assert len(UpperCAmelCase__) == len(
UpperCAmelCase__), f"""There should be as many titles than texts but got {len(UpperCAmelCase__)} titles and {len(UpperCAmelCase__)} texts."""
A__ = super().__call__(UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids''']
A__ = super().__call__(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids''']
A__ = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCAmelCase__ , UpperCAmelCase__)
]
}
if return_attention_mask is not False:
A__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
A__ = attention_mask
return self.pad(UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : BatchEncoding , UpperCAmelCase__ : DPRReaderOutput , UpperCAmelCase__ : int = 16 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 4 , ) ->List[DPRSpanPrediction]:
'''simple docstring'''
A__ = reader_input['''input_ids''']
A__ , A__ , A__ = reader_output[:3]
A__ = len(UpperCAmelCase__)
A__ = sorted(range(UpperCAmelCase__) , reverse=UpperCAmelCase__ , key=relevance_logits.__getitem__)
A__ = []
for doc_id in sorted_docs:
A__ = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
A__ = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
A__ = sequence_ids.index(self.pad_token_id)
else:
A__ = len(UpperCAmelCase__)
A__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCAmelCase__ , top_spans=UpperCAmelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCAmelCase__ , start_index=UpperCAmelCase__ , end_index=UpperCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1]) , ))
if len(UpperCAmelCase__) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , ) ->List[DPRSpanPrediction]:
'''simple docstring'''
A__ = []
for start_index, start_score in enumerate(UpperCAmelCase__):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
A__ = sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__: x[1] , reverse=UpperCAmelCase__)
A__ = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]"""
A__ = end_index - start_index + 1
assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(UpperCAmelCase__) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase__ )
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = READER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__ = DPRReaderTokenizer
| 14 | 1 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
_lowerCamelCase : int = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Dict , *UpperCAmelCase__ : str , **UpperCAmelCase__ : int) ->None:
'''simple docstring'''
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__)
| 14 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''encoder-decoder'''
UpperCAmelCase__ = True
def __init__( self : List[str] , **UpperCAmelCase__ : Union[str, Any]) ->List[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
A__ = kwargs.pop('''encoder''')
A__ = encoder_config.pop('''model_type''')
A__ = kwargs.pop('''decoder''')
A__ = decoder_config.pop('''model_type''')
from ..auto.configuration_auto import AutoConfig
A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = True
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Union[str, Any]) ->PretrainedConfig:
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''')
A__ = True
A__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__)
A__ = self.encoder.to_dict()
A__ = self.decoder.to_dict()
A__ = self.__class__.model_type
return output
| 14 | 1 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
"""simple docstring"""
A__ = OrderedDict()
for key, value in state_dict.items():
if key.startswith('''module.encoder''' ):
A__ = key.replace('''module.encoder''' , '''glpn.encoder''' )
if key.startswith('''module.decoder''' ):
A__ = key.replace('''module.decoder''' , '''decoder.stages''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
A__ = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
A__ = key.replace(f"""patch_embed{idx}""" , f"""patch_embeddings.{int(lowercase_ )-1}""" )
if "norm" in key:
A__ = key.replace('''norm''' , '''layer_norm''' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
A__ = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )]
A__ = key.replace(f"""layer_norm{idx}""" , f"""layer_norm.{int(lowercase_ )-1}""" )
if "layer_norm1" in key:
A__ = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
A__ = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
A__ = key[key.find('''block''' ) + len('''block''' )]
A__ = key.replace(f"""block{idx}""" , f"""block.{int(lowercase_ )-1}""" )
if "attn.q" in key:
A__ = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
A__ = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
A__ = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
A__ = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
A__ = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
A__ = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
A__ = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
A__ = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
A__ = key[key.find('''linear_c''' ) + len('''linear_c''' )]
A__ = key.replace(f"""linear_c{idx}""" , f"""linear_c.{int(lowercase_ )-1}""" )
if "bot_conv" in key:
A__ = key.replace('''bot_conv''' , '''0.convolution''' )
if "skip_conv1" in key:
A__ = key.replace('''skip_conv1''' , '''1.convolution''' )
if "skip_conv2" in key:
A__ = key.replace('''skip_conv2''' , '''2.convolution''' )
if "fusion1" in key:
A__ = key.replace('''fusion1''' , '''1.fusion''' )
if "fusion2" in key:
A__ = key.replace('''fusion2''' , '''2.fusion''' )
if "fusion3" in key:
A__ = key.replace('''fusion3''' , '''3.fusion''' )
if "fusion" in key and "conv" in key:
A__ = key.replace('''conv''' , '''convolutional_layer''' )
if key.startswith('''module.last_layer_depth''' ):
A__ = key.replace('''module.last_layer_depth''' , '''head.head''' )
A__ = value
return new_state_dict
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
A__ = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
A__ = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
A__ = kv_weight[
: config.hidden_sizes[i], :
]
A__ = kv_bias[: config.hidden_sizes[i]]
A__ = kv_weight[
config.hidden_sizes[i] :, :
]
A__ = kv_bias[config.hidden_sizes[i] :]
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
"""simple docstring"""
A__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A__ = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=False , lowercase_=None ) -> Any:
"""simple docstring"""
A__ = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
A__ = GLPNImageProcessor()
# prepare image
A__ = prepare_img()
A__ = image_processor(images=lowercase_ , return_tensors='''pt''' ).pixel_values
logger.info('''Converting model...''' )
# load original state dict
A__ = torch.load(lowercase_ , map_location=torch.device('''cpu''' ) )
# rename keys
A__ = rename_keys(lowercase_ )
# key and value matrices need special treatment
read_in_k_v(lowercase_ , lowercase_ )
# create HuggingFace model and load state dict
A__ = GLPNForDepthEstimation(lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
# forward pass
A__ = model(lowercase_ )
A__ = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
A__ = torch.tensor(
[[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] )
elif "kitti" in model_name:
A__ = torch.tensor(
[[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] )
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
A__ = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowercase_ , atol=1E-4 )
print('''Looks ok!''' )
# finally, push to hub if required
if push_to_hub:
logger.info('''Pushing model and image processor to the hub...''' )
model.push_to_hub(
repo_path_or_name=Path(lowercase_ , lowercase_ ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=lowercase_ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowercase_ , lowercase_ ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=lowercase_ , )
if __name__ == "__main__":
_lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
_lowerCamelCase : Any = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 14 |
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
"""simple docstring"""
A__ = [0] * len(lowercase_ )
A__ = []
A__ = [1] * len(lowercase_ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowercase_ ) ):
if indegree[i] == 0:
queue.append(lowercase_ )
while queue:
A__ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
A__ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(lowercase_ )
print(max(lowercase_ ) )
# Adjacency list of Graph
_lowerCamelCase : Optional[int] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 14 | 1 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCamelCase : Optional[Any] = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCamelCase : Dict = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCamelCase : str = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCamelCase : Any = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
_lowerCamelCase : Dict = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
_lowerCamelCase : Optional[int] = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
_lowerCamelCase : Optional[Any] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCamelCase : List[str] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCamelCase : Tuple = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase : List[str] = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
_lowerCamelCase : str = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
_lowerCamelCase : Optional[int] = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(UpperCAmelCase__ )
class UpperCamelCase_ :
'''simple docstring'''
def __call__( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Optional[bool] = None , **UpperCAmelCase__ : List[str] , ) ->BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , )
elif titles is None or texts is None:
A__ = titles if texts is None else texts
return super().__call__(
UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , )
A__ = titles if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [titles]
A__ = texts if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [texts]
A__ = len(UpperCAmelCase__)
A__ = questions if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [questions] * n_passages
if len(UpperCAmelCase__) != len(UpperCAmelCase__):
raise ValueError(
f"""There should be as many titles than texts but got {len(UpperCAmelCase__)} titles and {len(UpperCAmelCase__)} texts.""")
A__ = super().__call__(UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids''']
A__ = super().__call__(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids''']
A__ = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCAmelCase__ , UpperCAmelCase__)
]
}
if return_attention_mask is not False:
A__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
A__ = attention_mask
return self.pad(UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : BatchEncoding , UpperCAmelCase__ : DPRReaderOutput , UpperCAmelCase__ : int = 16 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 4 , ) ->List[DPRSpanPrediction]:
'''simple docstring'''
A__ = reader_input['''input_ids''']
A__ , A__ , A__ = reader_output[:3]
A__ = len(UpperCAmelCase__)
A__ = sorted(range(UpperCAmelCase__) , reverse=UpperCAmelCase__ , key=relevance_logits.__getitem__)
A__ = []
for doc_id in sorted_docs:
A__ = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
A__ = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
A__ = sequence_ids.index(self.pad_token_id)
else:
A__ = len(UpperCAmelCase__)
A__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCAmelCase__ , top_spans=UpperCAmelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCAmelCase__ , start_index=UpperCAmelCase__ , end_index=UpperCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1]) , ))
if len(UpperCAmelCase__) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , ) ->List[DPRSpanPrediction]:
'''simple docstring'''
A__ = []
for start_index, start_score in enumerate(UpperCAmelCase__):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
A__ = sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__: x[1] , reverse=UpperCAmelCase__)
A__ = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"""Wrong span indices: [{start_index}:{end_index}]""")
A__ = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"""Span is too long: {length} > {max_answer_length}""")
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(UpperCAmelCase__) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase__ )
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = READER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
| 14 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
_lowerCamelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCamelCase_ ( datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase__ = None
UpperCAmelCase__ = "utf-8"
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = True # deprecated
UpperCAmelCase__ = None # deprecated
UpperCAmelCase__ = 10 << 20 # 10MB
UpperCAmelCase__ = None
class UpperCamelCase_ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
UpperCAmelCase__ = JsonConfig
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
'''simple docstring'''
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''')
A__ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''')
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''')
return datasets.DatasetInfo(features=self.config.features)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : List[Any]) ->Dict:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""")
A__ = dl_manager.download_and_extract(self.config.data_files)
if isinstance(UpperCAmelCase__ , (str, list, tuple)):
A__ = data_files
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = [files]
A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files})]
A__ = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = [files]
A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCAmelCase__ , gen_kwargs={'''files''': files}))
return splits
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : pa.Table) ->pa.Table:
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features) - set(pa_table.column_names):
A__ = self.config.features.arrow_schema.field(UpperCAmelCase__).type
A__ = pa_table.append_column(UpperCAmelCase__ , pa.array([None] * len(UpperCAmelCase__) , type=UpperCAmelCase__))
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A__ = table_cast(UpperCAmelCase__ , self.config.features.arrow_schema)
return pa_table
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Tuple) ->str:
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__)):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
A__ = json.load(UpperCAmelCase__)
# We keep only the field we are interested in
A__ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(UpperCAmelCase__ , (list, tuple)):
A__ = set().union(*[row.keys() for row in dataset])
A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys}
else:
A__ = dataset
A__ = pa.Table.from_pydict(UpperCAmelCase__)
yield file_idx, self._cast_table(UpperCAmelCase__)
# If the file has one json object per line
else:
with open(UpperCAmelCase__ , '''rb''') as f:
A__ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A__ = max(self.config.chunksize // 32 , 16 << 10)
A__ = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
A__ = f.read(self.config.chunksize)
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(UpperCAmelCase__)
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A__ = batch.decode(self.config.encoding , errors=UpperCAmelCase__).encode('''utf-8''')
try:
while True:
try:
A__ = paj.read_json(
io.BytesIO(UpperCAmelCase__) , read_options=paj.ReadOptions(block_size=UpperCAmelCase__))
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(UpperCAmelCase__ , pa.ArrowInvalid)
and "straddling" not in str(UpperCAmelCase__)
or block_size > len(UpperCAmelCase__)
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"""Batch of {len(UpperCAmelCase__)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""")
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
A__ = json.load(UpperCAmelCase__)
except json.JSONDecodeError:
logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""")
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(UpperCAmelCase__ , UpperCAmelCase__): # list is the only sequence type supported in JSON
try:
A__ = set().union(*[row.keys() for row in dataset])
A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys}
A__ = pa.Table.from_pydict(UpperCAmelCase__)
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""")
raise ValueError(f"""Not able to read records in the JSON file at {file}.""") from None
yield file_idx, self._cast_table(UpperCAmelCase__)
break
else:
logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""")
raise ValueError(
f"""Not able to read records in the JSON file at {file}. """
f"""You should probably indicate the field of the JSON file containing your records. """
f"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """
f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase__)
batch_idx += 1
| 14 | 1 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
_lowerCamelCase : Union[str, Any] = {
"""/attention/""": """/0/SelfAttention/""",
"""/self_attention/""": """/0/SelfAttention/""",
"""/encoder_decoder_attention/""": """/1/EncDecAttention/""",
"""value""": """v""",
"""query""": """q""",
"""key""": """k""",
"""out""": """o""",
"""pre_self_attention_layer_norm""": """0/layer_norm""",
"""pre_cross_attention_layer_norm""": """1/layer_norm""",
"""pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong
"""token_embedder""": """shared""",
"""encoder_norm""": """final_layer_norm""",
"""decoder_norm""": """final_layer_norm""",
"""relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""",
"""router/router_weights/w/""": """router/classifier/""",
"""roer/roer_weights/w/""": """router/classifier/""",
"""logits_dense""": """lm_head""",
}
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
"""simple docstring"""
A__ = list(s_dict.keys() )
for key in keys:
A__ = R'''.*/layers_(\d+)'''
A__ = key
if re.match(lowercase_ , lowercase_ ):
A__ = re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , lowercase_ )
A__ = R'''(encoder|decoder)\/'''
if re.match(lowercase_ , lowercase_ ):
A__ = re.match(lowercase_ , lowercase_ ).groups()
if groups[0] == "encoder":
A__ = re.sub(R'''/mlp/''' , R'''/1/mlp/''' , lowercase_ )
A__ = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , lowercase_ )
elif groups[0] == "decoder":
A__ = re.sub(R'''/mlp/''' , R'''/2/mlp/''' , lowercase_ )
A__ = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , lowercase_ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
A__ = new_key.replace(lowercase_ , lowercase_ )
print(f"""{key} -> {new_key}""" )
A__ = s_dict.pop(lowercase_ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A__ = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A__ = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
A__ = s_dict[key].shape[0]
A__ = s_dict[key]
for idx in range(lowercase_ ):
A__ = expert_weihts[idx]
print(f"""{key} -> {key.replace("expert/" , "nested fstring" )}""" )
s_dict.pop(lowercase_ )
return s_dict
_lowerCamelCase : Dict = {
"""NUM_ENCODER_LAYERS""": """num_layers""",
"""NUM_DECODER_LAYERS""": """num_decoder_layers""",
"""NUM_HEADS""": """num_heads""",
"""HEAD_DIM""": """d_kv""",
"""EMBED_DIM""": """d_model""",
"""MLP_DIM""": """d_ff""",
"""NUM_SELECTED_EXPERTS""": """num_selected_experts""",
"""NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""",
"""NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""",
"""dense.MlpBlock.activations""": """feed_forward_proj""",
}
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[Any]:
"""simple docstring"""
import regex as re
with open(lowercase_ , '''r''' ) as f:
A__ = f.read()
A__ = re.findall(R'''(.*) = ([0-9.]*)''' , lowercase_ )
A__ = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
A__ = float(lowercase_ ) if '''.''' in value else int(lowercase_ )
A__ = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , lowercase_ )[0]
A__ = str(activation[1] )
A__ = num_experts
A__ = SwitchTransformersConfig(**lowercase_ )
return config
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=None , lowercase_="./" , lowercase_=8 ) -> int:
"""simple docstring"""
print(f"""Loading flax weights from : {flax_checkpoint_path}""" )
A__ = checkpoints.load_tax_checkpoint(lowercase_ )
if gin_file is not None:
A__ = convert_gin_to_config(lowercase_ , lowercase_ )
else:
A__ = SwitchTransformersConfig.from_pretrained(lowercase_ )
A__ = SwitchTransformersForConditionalGeneration(lowercase_ )
A__ = flax_params['''target''']
A__ = flatten_dict(lowercase_ , sep='''/''' )
A__ = rename_keys(lowercase_ )
A__ = unflatten_dict(lowercase_ , sep='''/''' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowercase_ , lowercase_ )
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"""
""" model architecture. If not provided, a `gin_file` has to be provided."""
),
)
parser.add_argument(
"""--gin_file""",
default=None,
type=str,
required=False,
help="""Path to the gin config file. If not provided, a `config_file` has to be passed """,
)
parser.add_argument(
"""--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model."""
)
parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""")
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 14 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
_lowerCamelCase : List[Any] = """sshleifer/bart-tiny-random"""
_lowerCamelCase : List[Any] = """patrickvonplaten/t5-tiny-random"""
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int:
'''simple docstring'''
return AutoConfig.from_pretrained(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any:
'''simple docstring'''
A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.num_hidden_layers , 1)
def SCREAMING_SNAKE_CASE ( self : int) ->Any:
'''simple docstring'''
A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers)
def SCREAMING_SNAKE_CASE ( self : Dict) ->int:
'''simple docstring'''
A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , 1)
def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase__):
create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=UpperCAmelCase__ , d=UpperCAmelCase__)
| 14 | 1 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
"""simple docstring"""
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ = None ) -> Dict:
"""simple docstring"""
A__ = tesseract_config if tesseract_config is not None else ''''''
# apply OCR
A__ = to_pil_image(lowercase_ )
A__ , A__ = pil_image.size
A__ = pytesseract.image_to_data(lowercase_ , lang=lowercase_ , output_type='''dict''' , config=lowercase_ )
A__ , A__ , A__ , A__ , A__ = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
A__ = [idx for idx, word in enumerate(lowercase_ ) if not word.strip()]
A__ = [word for idx, word in enumerate(lowercase_ ) if idx not in irrelevant_indices]
A__ = [coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
A__ = [coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
A__ = [coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
A__ = [coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
A__ = []
for x, y, w, h in zip(lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
A__ = [x, y, x + w, y + h]
actual_boxes.append(lowercase_ )
# finally, normalize the bounding boxes
A__ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowercase_ , lowercase_ , lowercase_ ) )
assert len(lowercase_ ) == len(lowercase_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = ['''pixel_values''']
def __init__( self : Tuple , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = "" , **UpperCAmelCase__ : Any , ) ->None:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
A__ = size if size is not None else {'''height''': 224, '''width''': 224}
A__ = get_size_dict(UpperCAmelCase__)
A__ = do_resize
A__ = size
A__ = resample
A__ = apply_ocr
A__ = ocr_lang
A__ = tesseract_config
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[int] , ) ->np.ndarray:
'''simple docstring'''
A__ = get_size_dict(UpperCAmelCase__)
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""")
A__ = (size['''height'''], size['''width'''])
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : Dict , ) ->PIL.Image.Image:
'''simple docstring'''
A__ = do_resize if do_resize is not None else self.do_resize
A__ = size if size is not None else self.size
A__ = get_size_dict(UpperCAmelCase__)
A__ = resample if resample is not None else self.resample
A__ = apply_ocr if apply_ocr is not None else self.apply_ocr
A__ = ocr_lang if ocr_lang is not None else self.ocr_lang
A__ = tesseract_config if tesseract_config is not None else self.tesseract_config
A__ = make_list_of_images(UpperCAmelCase__)
if not valid_images(UpperCAmelCase__):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''')
# All transformations expect numpy arrays.
A__ = [to_numpy_array(UpperCAmelCase__) for image in images]
if apply_ocr:
requires_backends(self , '''pytesseract''')
A__ = []
A__ = []
for image in images:
A__ , A__ = apply_tesseract(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
words_batch.append(UpperCAmelCase__)
boxes_batch.append(UpperCAmelCase__)
if do_resize:
A__ = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
A__ = [flip_channel_order(UpperCAmelCase__) for image in images]
A__ = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__) for image in images]
A__ = BatchFeature(data={'''pixel_values''': images} , tensor_type=UpperCAmelCase__)
if apply_ocr:
A__ = words_batch
A__ = boxes_batch
return data
| 14 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : List[Any]=30 , UpperCAmelCase__ : Any=400 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Any=[0.5, 0.5, 0.5] , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Optional[int]=1 / 255 , UpperCAmelCase__ : Optional[Any]=True , ) ->str:
'''simple docstring'''
A__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int=False) ->Optional[Any]:
'''simple docstring'''
if not batched:
A__ = image_inputs[0]
if isinstance(UpperCAmelCase__ , Image.Image):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size['''shortest_edge'''] * h / w)
A__ = self.size['''shortest_edge''']
elif w > h:
A__ = self.size['''shortest_edge''']
A__ = int(self.size['''shortest_edge'''] * w / h)
else:
A__ = self.size['''shortest_edge''']
A__ = self.size['''shortest_edge''']
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
A__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__: item[0])[0]
A__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = DeformableDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple:
'''simple docstring'''
A__ = DeformableDetrImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_rescale'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_pad'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''size'''))
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int:
'''simple docstring'''
A__ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333})
self.assertEqual(image_processor.do_pad , UpperCAmelCase__)
A__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase__)
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84})
self.assertEqual(image_processor.do_pad , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image)
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__)
A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray)
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : int) ->Tuple:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor)
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]:
'''simple docstring'''
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''') as f:
A__ = json.loads(f.read())
A__ = {'''image_id''': 39_769, '''annotations''': target}
# encode them
A__ = DeformableDetrImageProcessor()
A__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , return_tensors='''pt''')
# verify pixel values
A__ = torch.Size([1, 3, 800, 1_066])
self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__)
A__ = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4))
# verify area
A__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__))
# verify boxes
A__ = torch.Size([6, 4])
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__)
A__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1e-3))
# verify image_id
A__ = torch.tensor([39_769])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__))
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__))
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__))
# verify orig_size
A__ = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__))
# verify size
A__ = torch.tensor([800, 1_066])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__))
@slow
def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[int]:
'''simple docstring'''
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''') as f:
A__ = json.loads(f.read())
A__ = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target}
A__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''')
# encode them
A__ = DeformableDetrImageProcessor(format='''coco_panoptic''')
A__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , masks_path=UpperCAmelCase__ , return_tensors='''pt''')
# verify pixel values
A__ = torch.Size([1, 3, 800, 1_066])
self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__)
A__ = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4))
# verify area
A__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__))
# verify boxes
A__ = torch.Size([6, 4])
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__)
A__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1e-3))
# verify image_id
A__ = torch.tensor([39_769])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__))
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__))
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__))
# verify masks
A__ = 822_873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCAmelCase__)
# verify orig_size
A__ = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__))
# verify size
A__ = torch.tensor([800, 1_066])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__))
| 14 | 1 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = (DDIMParallelScheduler,)
UpperCAmelCase__ = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def SCREAMING_SNAKE_CASE ( self : str , **UpperCAmelCase__ : str) ->List[str]:
'''simple docstring'''
A__ = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**UpperCAmelCase__)
return config
def SCREAMING_SNAKE_CASE ( self : int , **UpperCAmelCase__ : List[Any]) ->Tuple:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(**UpperCAmelCase__)
A__ = scheduler_class(**UpperCAmelCase__)
A__ , A__ = 10, 0.0
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase__)
for t in scheduler.timesteps:
A__ = model(UpperCAmelCase__ , UpperCAmelCase__)
A__ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__).prev_sample
return sample
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
'''simple docstring'''
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Tuple:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=UpperCAmelCase__)
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(steps_offset=1)
A__ = scheduler_class(**UpperCAmelCase__)
scheduler.set_timesteps(5)
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1]))
def SCREAMING_SNAKE_CASE ( self : str) ->int:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=UpperCAmelCase__ , beta_end=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->int:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]:
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[int]:
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Union[str, Any]:
'''simple docstring'''
self.check_over_configs(thresholding=UpperCAmelCase__)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase__ , prediction_type=UpperCAmelCase__ , sample_max_value=UpperCAmelCase__ , )
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]:
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500]):
self.check_over_forward(time_step=UpperCAmelCase__ , num_inference_steps=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[int]:
'''simple docstring'''
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0]):
self.check_over_forward(time_step=UpperCAmelCase__ , eta=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Dict:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400) - 0.14771)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960) - 0.32460)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486) - 0.00979)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998) - 0.02)) < 1e-5
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
A__ , A__ = 10, 0.0
scheduler.set_timesteps(UpperCAmelCase__)
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
A__ = self.dummy_sample_deter + 0.1
A__ = self.dummy_sample_deter - 0.1
A__ = samplea.shape[0]
A__ = torch.stack([samplea, samplea, samplea] , dim=0)
A__ = torch.arange(UpperCAmelCase__)[0:3, None].repeat(1 , UpperCAmelCase__)
A__ = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
A__ = scheduler.batch_step_no_noise(UpperCAmelCase__ , timesteps.flatten(0 , 1) , samples.flatten(0 , 1) , UpperCAmelCase__)
A__ = torch.sum(torch.abs(UpperCAmelCase__))
A__ = torch.mean(torch.abs(UpperCAmelCase__))
assert abs(result_sum.item() - 1147.7904) < 1e-2
assert abs(result_mean.item() - 0.4982) < 1e-3
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
'''simple docstring'''
A__ = self.full_loop()
A__ = torch.sum(torch.abs(UpperCAmelCase__))
A__ = torch.mean(torch.abs(UpperCAmelCase__))
assert abs(result_sum.item() - 172.0067) < 1e-2
assert abs(result_mean.item() - 0.223967) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
'''simple docstring'''
A__ = self.full_loop(prediction_type='''v_prediction''')
A__ = torch.sum(torch.abs(UpperCAmelCase__))
A__ = torch.mean(torch.abs(UpperCAmelCase__))
assert abs(result_sum.item() - 52.5302) < 1e-2
assert abs(result_mean.item() - 0.0684) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
'''simple docstring'''
A__ = self.full_loop(set_alpha_to_one=UpperCAmelCase__ , beta_start=0.01)
A__ = torch.sum(torch.abs(UpperCAmelCase__))
A__ = torch.mean(torch.abs(UpperCAmelCase__))
assert abs(result_sum.item() - 149.8295) < 1e-2
assert abs(result_mean.item() - 0.1951) < 1e-3
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[int]:
'''simple docstring'''
A__ = self.full_loop(set_alpha_to_one=UpperCAmelCase__ , beta_start=0.01)
A__ = torch.sum(torch.abs(UpperCAmelCase__))
A__ = torch.mean(torch.abs(UpperCAmelCase__))
assert abs(result_sum.item() - 149.0784) < 1e-2
assert abs(result_mean.item() - 0.1941) < 1e-3
| 14 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
_lowerCamelCase : str = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
_lowerCamelCase : Tuple = typing.Union[np.floataa, int, float] # noqa: UP007
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> VectorOut:
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(lowercase_ ) - np.asarray(lowercase_ )) ** 2 ) )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> VectorOut:
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(lowercase_ , lowercase_ ) ) ** (1 / 2)
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) )
benchmark()
| 14 | 1 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_lowerCamelCase : Optional[Any] = [
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
_lowerCamelCase : Optional[int] = [
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
_lowerCamelCase : List[Any] = (
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
_lowerCamelCase : Optional[int] = (
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
_lowerCamelCase : Dict = [
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> List[Any]:
"""simple docstring"""
for tf_name, hf_name in patterns:
A__ = k.replace(lowercase_ , lowercase_ )
return k
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> BigBirdPegasusForConditionalGeneration:
"""simple docstring"""
A__ = BigBirdPegasusConfig(**lowercase_ )
A__ = BigBirdPegasusForConditionalGeneration(lowercase_ )
A__ = torch_model.state_dict()
A__ = {}
# separating decoder weights
A__ = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
A__ = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() , '''tf -> hf conversion''' ):
A__ = [k.endswith(lowercase_ ) for ending in KEYS_TO_IGNORE]
if any(lowercase_ ):
continue
A__ = DECODER_PATTERNS
A__ = rename_state_dict_key(lowercase_ , lowercase_ )
if new_k not in state_dict:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
A__ = v.T
A__ = torch.from_numpy(lowercase_ )
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , '''tf -> hf conversion''' ):
A__ = [k.endswith(lowercase_ ) for ending in KEYS_TO_IGNORE]
if any(lowercase_ ):
continue
A__ = REMAINING_PATTERNS
A__ = rename_state_dict_key(lowercase_ , lowercase_ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
A__ = v.T
A__ = torch.from_numpy(lowercase_ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
A__ = mapping['''model.embed_positions.weight''']
A__ = mapping.pop('''model.embed_positions.weight''' )
A__ , A__ = torch_model.load_state_dict(lowercase_ , strict=lowercase_ )
A__ = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict:
"""simple docstring"""
A__ = tf.train.list_variables(lowercase_ )
A__ = {}
A__ = ['''global_step''']
for name, shape in tqdm(lowercase_ , desc='''converting tf checkpoint to dict''' ):
A__ = any(pat in name for pat in ignore_name )
if skip_key:
continue
A__ = tf.train.load_variable(lowercase_ , lowercase_ )
A__ = array
return tf_weights
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Any:
"""simple docstring"""
A__ = get_tf_weights_as_numpy(lowercase_ )
A__ = convert_bigbird_pegasus(lowercase_ , lowercase_ )
torch_model.save_pretrained(lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
_lowerCamelCase : int = parser.parse_args()
_lowerCamelCase : List[Any] = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 14 |
from ...processing_utils import ProcessorMixin
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''SpeechT5FeatureExtractor'''
UpperCAmelCase__ = '''SpeechT5Tokenizer'''
def __init__( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple) ->Union[str, Any]:
'''simple docstring'''
super().__init__(UpperCAmelCase__ , UpperCAmelCase__)
def __call__( self : Dict , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Any) ->Optional[Any]:
'''simple docstring'''
A__ = kwargs.pop('''audio''' , UpperCAmelCase__)
A__ = kwargs.pop('''text''' , UpperCAmelCase__)
A__ = kwargs.pop('''text_target''' , UpperCAmelCase__)
A__ = kwargs.pop('''audio_target''' , UpperCAmelCase__)
A__ = kwargs.pop('''sampling_rate''' , UpperCAmelCase__)
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''')
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''')
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''')
if audio is not None:
A__ = self.feature_extractor(UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__)
elif text is not None:
A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__)
else:
A__ = None
if audio_target is not None:
A__ = self.feature_extractor(audio_target=UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__)
A__ = targets['''input_values''']
elif text_target is not None:
A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = targets['''input_ids''']
else:
A__ = None
if inputs is None:
return targets
if targets is not None:
A__ = labels
A__ = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : int) ->Optional[int]:
'''simple docstring'''
A__ = kwargs.pop('''input_values''' , UpperCAmelCase__)
A__ = kwargs.pop('''input_ids''' , UpperCAmelCase__)
A__ = kwargs.pop('''labels''' , UpperCAmelCase__)
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''')
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''')
if input_values is not None:
A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__)
elif input_ids is not None:
A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__)
else:
A__ = None
if labels is not None:
if "input_ids" in labels or (isinstance(UpperCAmelCase__ , UpperCAmelCase__) and "input_ids" in labels[0]):
A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = targets['''input_ids''']
else:
A__ = self.feature_extractor.feature_size
A__ = self.feature_extractor.num_mel_bins
A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__)
A__ = feature_size_hack
A__ = targets['''input_values''']
else:
A__ = None
if inputs is None:
return targets
if targets is not None:
A__ = labels
A__ = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE ( self : Any , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Union[str, Any]) ->Dict:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__)
| 14 | 1 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = ['''image_processor''', '''tokenizer''']
UpperCAmelCase__ = '''BridgeTowerImageProcessor'''
UpperCAmelCase__ = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any]) ->int:
'''simple docstring'''
super().__init__(UpperCAmelCase__ , UpperCAmelCase__)
def __call__( self : Optional[int] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase__ : Union[str, Any] , ) ->BatchEncoding:
'''simple docstring'''
A__ = self.tokenizer(
text=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , stride=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_overflowing_tokens=UpperCAmelCase__ , return_special_tokens_mask=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , return_length=UpperCAmelCase__ , verbose=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ , )
# add pixel_values + pixel_mask
A__ = self.image_processor(
UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , do_normalize=UpperCAmelCase__ , do_center_crop=UpperCAmelCase__ , **UpperCAmelCase__)
encoding.update(UpperCAmelCase__)
return encoding
def SCREAMING_SNAKE_CASE ( self : List[Any] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Union[str, Any]) ->List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str] , *UpperCAmelCase__ : int , **UpperCAmelCase__ : List[str]) ->str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__)
@property
def SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]:
'''simple docstring'''
A__ = self.tokenizer.model_input_names
A__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 14 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : str = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''git_vision_model'''
def __init__( self : Any , UpperCAmelCase__ : Any=768 , UpperCAmelCase__ : int=3_072 , UpperCAmelCase__ : List[str]=12 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : List[Any]=224 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : Union[str, Any]="quick_gelu" , UpperCAmelCase__ : Dict=1e-5 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Any=0.02 , **UpperCAmelCase__ : Any , ) ->Optional[int]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
A__ = hidden_size
A__ = intermediate_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = num_channels
A__ = patch_size
A__ = image_size
A__ = initializer_range
A__ = attention_dropout
A__ = layer_norm_eps
A__ = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : int) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase__)
A__ , A__ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__)
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''') == "git":
A__ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''git'''
def __init__( self : Dict , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int=30_522 , UpperCAmelCase__ : Optional[int]=768 , UpperCAmelCase__ : Dict=6 , UpperCAmelCase__ : int=12 , UpperCAmelCase__ : List[str]=3_072 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=1_024 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : Any=1e-12 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : List[Any]="absolute" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : int=101 , UpperCAmelCase__ : Tuple=102 , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : List[str] , ) ->Any:
'''simple docstring'''
super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__)
if vision_config is None:
A__ = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''')
A__ = GitVisionConfig(**UpperCAmelCase__)
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = tie_word_embeddings
A__ = num_image_with_embedding
A__ = bos_token_id
A__ = eos_token_id
def SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__)
A__ = self.vision_config.to_dict()
A__ = self.__class__.model_type
return output
| 14 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : List[Any]=30 , UpperCAmelCase__ : Any=400 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Any=[0.5, 0.5, 0.5] , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Optional[int]=1 / 255 , UpperCAmelCase__ : Optional[Any]=True , ) ->str:
'''simple docstring'''
A__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int=False) ->Optional[Any]:
'''simple docstring'''
if not batched:
A__ = image_inputs[0]
if isinstance(UpperCAmelCase__ , Image.Image):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size['''shortest_edge'''] * h / w)
A__ = self.size['''shortest_edge''']
elif w > h:
A__ = self.size['''shortest_edge''']
A__ = int(self.size['''shortest_edge'''] * w / h)
else:
A__ = self.size['''shortest_edge''']
A__ = self.size['''shortest_edge''']
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
A__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__: item[0])[0]
A__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = DeformableDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple:
'''simple docstring'''
A__ = DeformableDetrImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_rescale'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_pad'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''size'''))
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int:
'''simple docstring'''
A__ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333})
self.assertEqual(image_processor.do_pad , UpperCAmelCase__)
A__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase__)
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84})
self.assertEqual(image_processor.do_pad , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image)
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__)
A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray)
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : int) ->Tuple:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor)
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]:
'''simple docstring'''
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''') as f:
A__ = json.loads(f.read())
A__ = {'''image_id''': 39_769, '''annotations''': target}
# encode them
A__ = DeformableDetrImageProcessor()
A__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , return_tensors='''pt''')
# verify pixel values
A__ = torch.Size([1, 3, 800, 1_066])
self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__)
A__ = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4))
# verify area
A__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__))
# verify boxes
A__ = torch.Size([6, 4])
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__)
A__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1e-3))
# verify image_id
A__ = torch.tensor([39_769])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__))
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__))
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__))
# verify orig_size
A__ = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__))
# verify size
A__ = torch.tensor([800, 1_066])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__))
@slow
def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[int]:
'''simple docstring'''
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''') as f:
A__ = json.loads(f.read())
A__ = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target}
A__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''')
# encode them
A__ = DeformableDetrImageProcessor(format='''coco_panoptic''')
A__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , masks_path=UpperCAmelCase__ , return_tensors='''pt''')
# verify pixel values
A__ = torch.Size([1, 3, 800, 1_066])
self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__)
A__ = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4))
# verify area
A__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__))
# verify boxes
A__ = torch.Size([6, 4])
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__)
A__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1e-3))
# verify image_id
A__ = torch.tensor([39_769])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__))
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__))
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__))
# verify masks
A__ = 822_873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCAmelCase__)
# verify orig_size
A__ = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__))
# verify size
A__ = torch.tensor([800, 1_066])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__))
| 14 |
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
A__ = BeautifulSoup(requests.get(lowercase_ , params=lowercase_ ).content , '''html.parser''' )
A__ = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
A__ = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 30,
"""pages""": """3979-3990""",
"""year""": 2018,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 14 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Tuple = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=False ) -> Union[str, Any]:
"""simple docstring"""
A__ = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('''head''' ):
A__ = '''segformer.encoder.''' + key
if key.startswith('''backbone''' ):
A__ = key.replace('''backbone''' , '''segformer.encoder''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
A__ = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
A__ = key.replace(f"""patch_embed{idx}""" , f"""patch_embeddings.{int(lowercase_ )-1}""" )
if "norm" in key:
A__ = key.replace('''norm''' , '''layer_norm''' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
A__ = key[key.find('''segformer.encoder.layer_norm''' ) + len('''segformer.encoder.layer_norm''' )]
A__ = key.replace(f"""layer_norm{idx}""" , f"""layer_norm.{int(lowercase_ )-1}""" )
if "layer_norm1" in key:
A__ = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
A__ = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
A__ = key[key.find('''block''' ) + len('''block''' )]
A__ = key.replace(f"""block{idx}""" , f"""block.{int(lowercase_ )-1}""" )
if "attn.q" in key:
A__ = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
A__ = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
A__ = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
A__ = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
A__ = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
A__ = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
A__ = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
A__ = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
A__ = key[key.find('''linear_c''' ) + len('''linear_c''' )]
A__ = key.replace(f"""linear_c{idx}""" , f"""linear_c.{int(lowercase_ )-1}""" )
if key.startswith('''head''' ):
A__ = key.replace('''head''' , '''classifier''' )
A__ = value
return new_state_dict
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
A__ = state_dict.pop(f"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" )
A__ = state_dict.pop(f"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
A__ = kv_weight[
: config.hidden_sizes[i], :
]
A__ = kv_bias[: config.hidden_sizes[i]]
A__ = kv_weight[
config.hidden_sizes[i] :, :
]
A__ = kv_bias[
config.hidden_sizes[i] :
]
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
"""simple docstring"""
A__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A__ = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
"""simple docstring"""
A__ = SegformerConfig()
A__ = False
# set attributes based on model_name
A__ = '''huggingface/label-files'''
if "segformer" in model_name:
A__ = model_name[len('''segformer.''' ) : len('''segformer.''' ) + 2]
if "ade" in model_name:
A__ = 150
A__ = '''ade20k-id2label.json'''
A__ = (1, 150, 128, 128)
elif "city" in model_name:
A__ = 19
A__ = '''cityscapes-id2label.json'''
A__ = (1, 19, 128, 128)
else:
raise ValueError(f"""Model {model_name} not supported""" )
elif "mit" in model_name:
A__ = True
A__ = model_name[4:6]
A__ = 1_000
A__ = '''imagenet-1k-id2label.json'''
A__ = (1, 1_000)
else:
raise ValueError(f"""Model {model_name} not supported""" )
# set config attributes
A__ = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) )
A__ = {int(lowercase_ ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
A__ = [64, 128, 320, 512]
A__ = 256
elif size == "b2":
A__ = [64, 128, 320, 512]
A__ = 768
A__ = [3, 4, 6, 3]
elif size == "b3":
A__ = [64, 128, 320, 512]
A__ = 768
A__ = [3, 4, 18, 3]
elif size == "b4":
A__ = [64, 128, 320, 512]
A__ = 768
A__ = [3, 8, 27, 3]
elif size == "b5":
A__ = [64, 128, 320, 512]
A__ = 768
A__ = [3, 6, 40, 3]
else:
raise ValueError(f"""Size {size} not supported""" )
# load image processor (only resize + normalize)
A__ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowercase_ , align=lowercase_ , do_random_crop=lowercase_ )
# prepare image
A__ = prepare_img()
A__ = image_processor(images=lowercase_ , return_tensors='''pt''' ).pixel_values
logger.info(f"""Converting model {model_name}...""" )
# load original state dict
if encoder_only:
A__ = torch.load(lowercase_ , map_location=torch.device('''cpu''' ) )
else:
A__ = torch.load(lowercase_ , map_location=torch.device('''cpu''' ) )['''state_dict''']
# rename keys
A__ = rename_keys(lowercase_ , encoder_only=lowercase_ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(lowercase_ , lowercase_ )
# create HuggingFace model and load state dict
if encoder_only:
A__ = False
A__ = SegformerForImageClassification(lowercase_ )
else:
A__ = SegformerForSemanticSegmentation(lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
# forward pass
A__ = model(lowercase_ )
A__ = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
A__ = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
A__ = torch.tensor(
[
[[-7.58_20, -8.72_31, -8.32_15], [-8.06_00, -10.35_29, -10.03_04], [-7.52_08, -9.41_03, -9.62_39]],
[[-12.69_18, -13.89_94, -13.71_37], [-13.31_96, -15.75_23, -15.47_89], [-12.93_43, -14.87_57, -14.96_89]],
[[-11.19_11, -11.94_21, -11.32_43], [-11.33_42, -13.68_39, -13.35_81], [-10.39_09, -12.18_32, -12.48_58]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
A__ = torch.tensor(
[
[[-11.81_73, -14.38_50, -16.31_28], [-14.56_48, -16.58_04, -18.65_68], [-14.72_23, -15.73_87, -18.42_18]],
[[-15.72_90, -17.91_71, -19.44_23], [-18.31_05, -19.94_48, -21.46_61], [-17.92_96, -18.64_97, -20.79_10]],
[[-15.07_83, -17.03_36, -18.27_89], [-16.87_71, -18.68_70, -20.16_12], [-16.24_54, -17.14_26, -19.50_55]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
A__ = torch.tensor(
[
[[-9.08_78, -10.20_81, -10.18_91], [-9.31_44, -10.79_41, -10.98_43], [-9.22_94, -10.38_55, -10.57_04]],
[[-12.23_16, -13.90_68, -13.61_02], [-12.91_61, -14.37_02, -14.32_35], [-12.52_33, -13.71_74, -13.79_32]],
[[-14.62_75, -15.24_90, -14.97_27], [-14.34_00, -15.96_87, -16.28_27], [-14.14_84, -15.40_33, -15.89_37]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
A__ = torch.tensor(
[
[[-12.31_44, -13.24_47, -14.08_02], [-13.36_14, -14.58_16, -15.61_17], [-13.33_40, -14.44_33, -16.22_19]],
[[-19.27_81, -20.41_28, -20.75_06], [-20.61_53, -21.65_66, -22.09_98], [-19.98_00, -21.04_30, -22.14_94]],
[[-18.87_39, -19.78_04, -21.18_34], [-20.12_33, -21.67_65, -23.29_44], [-20.03_15, -21.26_41, -23.69_44]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
A__ = torch.tensor(
[
[[-9.55_24, -12.08_35, -11.73_48], [-10.52_29, -13.64_46, -14.56_62], [-9.58_42, -12.88_51, -13.94_14]],
[[-15.34_32, -17.53_23, -17.08_18], [-16.33_30, -18.92_55, -19.21_01], [-15.13_40, -17.78_48, -18.39_71]],
[[-12.60_72, -14.94_86, -14.66_31], [-13.76_29, -17.09_07, -17.77_45], [-12.78_99, -16.16_95, -17.16_71]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
A__ = torch.tensor(
[
[[-11.92_95, -13.40_57, -14.81_06], [-13.34_31, -14.81_79, -15.37_81], [-14.28_36, -15.59_42, -16.15_88]],
[[-11.49_06, -12.80_67, -13.65_64], [-13.11_89, -14.05_00, -14.15_43], [-13.87_48, -14.51_36, -14.87_89]],
[[0.53_74, 0.10_67, -0.47_42], [0.11_41, -0.22_55, -0.70_99], [-0.30_00, -0.59_24, -1.31_05]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
A__ = torch.tensor(
[
[[-7.82_17, -9.87_67, -10.17_17], [-9.44_38, -10.90_58, -11.40_47], [-9.79_39, -12.34_95, -12.10_79]],
[[-7.15_14, -9.53_36, -10.08_60], [-9.77_76, -11.68_22, -11.84_39], [-10.14_11, -12.76_55, -12.89_72]],
[[0.30_21, 0.08_05, -0.23_10], [-0.03_28, -0.16_05, -0.27_14], [-0.14_08, -0.54_77, -0.69_76]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
A__ = torch.tensor(
[
[
[-1.1_3_7_2E0_1, -1.2_7_8_7E0_1, -1.3_4_7_7E0_1],
[-1.2_5_3_6E0_1, -1.4_1_9_4E0_1, -1.4_4_0_9E0_1],
[-1.3_2_1_7E0_1, -1.4_8_8_8E0_1, -1.5_3_2_7E0_1],
],
[
[-1.4_7_9_1E0_1, -1.7_1_2_2E0_1, -1.8_2_7_7E0_1],
[-1.7_1_6_3E0_1, -1.9_1_9_2E0_1, -1.9_5_3_3E0_1],
[-1.7_8_9_7E0_1, -1.9_9_9_1E0_1, -2.0_3_1_5E0_1],
],
[
[7.6_7_2_3E-0_1, 4.1_9_2_1E-0_1, -7.7_8_7_8E-0_2],
[4.7_7_7_2E-0_1, 9.5_5_5_7E-0_3, -2.8_0_8_2E-0_1],
[3.6_0_3_2E-0_1, -2.4_8_2_6E-0_1, -5.1_1_6_8E-0_1],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
A__ = torch.tensor(
[
[[-9.49_59, -11.30_87, -11.74_79], [-11.00_25, -12.65_40, -12.33_19], [-11.40_64, -13.04_87, -12.99_05]],
[[-9.89_05, -11.30_84, -12.08_54], [-11.17_26, -12.76_98, -12.95_83], [-11.59_85, -13.32_78, -14.17_74]],
[[0.22_13, 0.01_92, -0.24_66], [-0.17_31, -0.42_13, -0.48_74], [-0.31_26, -0.65_41, -1.13_89]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
A__ = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
A__ = torch.tensor(
[
[[-16.09_76, -16.48_56, -17.39_62], [-16.62_34, -19.03_42, -19.76_85], [-16.09_00, -18.06_61, -19.11_80]],
[[-18.47_50, -18.84_88, -19.50_74], [-19.40_30, -22.15_70, -22.59_77], [-19.11_91, -20.84_86, -22.37_83]],
[[-4.51_78, -5.50_37, -6.51_09], [-5.08_84, -7.21_74, -8.03_34], [-4.41_56, -5.81_17, -7.29_70]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
A__ = torch.tensor(
[
[[-14.20_81, -14.47_32, -14.19_77], [-14.58_67, -16.44_23, -16.63_56], [-13.44_41, -14.96_85, -16.86_96]],
[[-14.45_76, -14.70_73, -15.04_51], [-15.08_16, -17.62_37, -17.98_73], [-14.42_13, -16.01_99, -18.59_92]],
[[-4.73_49, -4.95_88, -5.09_66], [-4.32_10, -6.93_25, -7.25_91], [-3.43_12, -4.74_84, -7.19_17]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
A__ = torch.tensor(
[
[[-11.77_37, -11.95_26, -11.32_73], [-13.66_92, -14.45_74, -13.88_78], [-13.89_37, -14.69_24, -15.93_45]],
[[-14.67_06, -14.53_30, -14.13_06], [-16.15_02, -16.81_80, -16.42_69], [-16.83_38, -17.89_39, -20.17_46]],
[[1.04_91, 0.82_89, 1.03_10], [1.10_44, 0.52_19, 0.80_55], [1.08_99, 0.69_26, 0.55_90]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
A__ = torch.tensor(
[
[[-12.56_41, -13.47_77, -13.06_84], [-13.95_87, -15.89_83, -16.65_57], [-13.31_09, -15.73_50, -16.31_41]],
[[-14.70_74, -15.43_52, -14.59_44], [-16.63_53, -18.16_63, -18.61_20], [-15.17_02, -18.03_29, -18.15_47]],
[[-1.79_90, -2.09_51, -1.77_84], [-2.63_97, -3.82_45, -3.96_86], [-1.52_64, -2.81_26, -2.93_16]],
] )
else:
A__ = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1E-2 )
# finally, save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
image_processor.save_pretrained(lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""segformer.b0.512x512.ade.160k""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 14 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
"""simple docstring"""
A__ = StableDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
A__ = load_file(lowercase_ )
A__ = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
A__ = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
A__ = pipeline.text_encoder
else:
A__ = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
A__ = pipeline.unet
# find the target layer
A__ = layer_infos.pop(0 )
while len(lowercase_ ) > -1:
try:
A__ = curr_layer.__getattr__(lowercase_ )
if len(lowercase_ ) > 0:
A__ = layer_infos.pop(0 )
elif len(lowercase_ ) == 0:
break
except Exception:
if len(lowercase_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
A__ = layer_infos.pop(0 )
A__ = []
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) )
pair_keys.append(lowercase_ )
else:
pair_keys.append(lowercase_ )
pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
A__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
A__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ ).unsqueeze(2 ).unsqueeze(3 )
else:
A__ = state_dict[pair_keys[0]].to(torch.floataa )
A__ = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ )
# update visited list
for item in pair_keys:
visited.append(lowercase_ )
return pipeline
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
_lowerCamelCase : Tuple = parser.parse_args()
_lowerCamelCase : List[Any] = args.base_model_path
_lowerCamelCase : Optional[int] = args.checkpoint_path
_lowerCamelCase : Dict = args.dump_path
_lowerCamelCase : Optional[Any] = args.lora_prefix_unet
_lowerCamelCase : Optional[int] = args.lora_prefix_text_encoder
_lowerCamelCase : List[Any] = args.alpha
_lowerCamelCase : int = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_lowerCamelCase : Tuple = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 14 | 1 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_lowerCamelCase : List[str] = logging.get_logger(__name__)
@dataclass
class UpperCamelCase_ :
'''simple docstring'''
UpperCAmelCase__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
UpperCAmelCase__ = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
UpperCAmelCase__ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase__ = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def SCREAMING_SNAKE_CASE ( self : str) ->str:
'''simple docstring'''
A__ = self.task_name.lower()
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''train'''
UpperCAmelCase__ = '''dev'''
UpperCAmelCase__ = '''test'''
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
def __init__( self : int , UpperCAmelCase__ : GlueDataTrainingArguments , UpperCAmelCase__ : PreTrainedTokenizerBase , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Union[str, Split] = Split.train , UpperCAmelCase__ : Optional[str] = None , ) ->str:
'''simple docstring'''
warnings.warn(
'''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , UpperCAmelCase__ , )
A__ = args
A__ = glue_processors[args.task_name]()
A__ = glue_output_modes[args.task_name]
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
try:
A__ = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''')
# Load data features from cache or dataset file
A__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
A__ = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A__ , A__ = label_list[2], label_list[1]
A__ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A__ = cached_features_file + '''.lock'''
with FileLock(UpperCAmelCase__):
if os.path.exists(UpperCAmelCase__) and not args.overwrite_cache:
A__ = time.time()
A__ = torch.load(UpperCAmelCase__)
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start)
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""")
if mode == Split.dev:
A__ = self.processor.get_dev_examples(args.data_dir)
elif mode == Split.test:
A__ = self.processor.get_test_examples(args.data_dir)
else:
A__ = self.processor.get_train_examples(args.data_dir)
if limit_length is not None:
A__ = examples[:limit_length]
A__ = glue_convert_examples_to_features(
UpperCAmelCase__ , UpperCAmelCase__ , max_length=args.max_seq_length , label_list=UpperCAmelCase__ , output_mode=self.output_mode , )
A__ = time.time()
torch.save(self.features , UpperCAmelCase__)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""")
def __len__( self : Optional[Any]) ->Dict:
'''simple docstring'''
return len(self.features)
def __getitem__( self : int , UpperCAmelCase__ : int) ->InputFeatures:
'''simple docstring'''
return self.features[i]
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Any:
'''simple docstring'''
return self.label_list
| 14 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
_lowerCamelCase : Any = """
import os
"""
_lowerCamelCase : Optional[int] = """
def foo():
import os
return False
"""
_lowerCamelCase : List[Any] = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
_lowerCamelCase : List[Any] = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
_lowerCamelCase : Union[str, Any] = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
_lowerCamelCase : List[Any] = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
_lowerCamelCase : List[Any] = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
_lowerCamelCase : str = """
import os
try:
import bar
except:
raise ValueError()
"""
_lowerCamelCase : Optional[Any] = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
_lowerCamelCase : Any = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
_lowerCamelCase : Dict = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
A__ = os.path.join(lowercase_ , '''test_file.py''' )
with open(lowercase_ , '''w''' ) as _tmp_file:
_tmp_file.write(lowercase_ )
A__ = get_imports(lowercase_ )
assert parsed_imports == ["os"]
| 14 | 1 |
_lowerCamelCase : Tuple = """
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_lowerCamelCase : Tuple = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_lowerCamelCase : Any = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 14 |
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
return int(input_a == input_a == 0 )
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 14 | 1 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
A__ = os.path.join(args.tf_model_dir , '''parameters.json''' )
A__ = json.loads(open(lowercase_ ).read() )
if not params:
raise ValueError(
f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith('''.pt''' ):
A__ = args.output + '''.pt'''
A__ = OrderedDict()
with tf.device('''/CPU:0''' ):
A__ = tf.train.load_checkpoint(args.tf_model_dir )
A__ = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
A__ = reader.get_tensor(lowercase_ ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
A__ = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
A__ = 8
A__ = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
A__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A__ = torch.tensor(lowercase_ )
elif key_name.startswith('''model/moe''' ):
A__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
A__ = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
A__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A__ = torch.tensor(lowercase_ )
elif key_name.endswith('''/softmlp/kernel''' ):
A__ = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
A__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A__ = torch.tensor(lowercase_ )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
A__ = key_name[-9:-7]
for i in range(16 ):
A__ = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
A__ = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
A__ = torch.tensor(lowercase_ )
elif key_name.startswith('''model/mlp''' ):
A__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
A__ = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
A__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A__ = torch.tensor(lowercase_ )
elif key_name.endswith('''/p1/bias''' ):
A__ = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
A__ = vnp.copy() # same because it is one dimensional
A__ = torch.tensor(lowercase_ )
elif key_name.endswith('''/p2/kernel''' ):
A__ = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
A__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A__ = torch.tensor(lowercase_ )
elif key_name.endswith('''/p2/bias''' ):
A__ = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
A__ = vnp.copy() # same because it is one dimensional
A__ = torch.tensor(lowercase_ )
elif key_name.startswith('''model/ln''' ):
A__ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
A__ = '''model.blocks.%d.feed_forward.norm.bias''' % player
A__ = vnp.copy() # same because it is one dimensional
A__ = torch.tensor(lowercase_ )
elif key_name.endswith('''/g''' ):
A__ = '''model.blocks.%d.feed_forward.norm.weight''' % player
A__ = vnp.copy() # same because it is one dimensional
A__ = torch.tensor(lowercase_ )
elif key_name.startswith('''model/att''' ):
A__ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
A__ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
A__ = state[:, 0, :, :]
A__ = state[:, 1, :, :]
A__ = state[:, 2, :, :]
A__ = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
A__ = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
A__ = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
A__ = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
A__ = torch.tensor(lowercase_ )
A__ = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
A__ = torch.tensor(lowercase_ )
A__ = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
A__ = torch.tensor(lowercase_ )
elif key_name.endswith('''/o/kernel''' ):
A__ = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
A__ = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
A__ = torch.tensor(lowercase_ )
elif key_name.startswith('''model/an''' ):
A__ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
A__ = '''model.blocks.%d.self_attn.norm.bias''' % player
A__ = vnp.copy() # same because it is one dimensional
A__ = torch.tensor(lowercase_ )
elif key_name.endswith('''/g''' ):
A__ = '''model.blocks.%d.self_attn.norm.weight''' % player
A__ = vnp.copy() # same because it is one dimensional
A__ = torch.tensor(lowercase_ )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
A__ = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
A__ = '''model.%s.weight''' % nlayer
A__ = vnp.copy() # same in embedded
A__ = torch.tensor(lowercase_ )
if key_name.startswith('''model/wte''' ):
A__ = '''lm_head.weight'''
A__ = vnp.copy() # same in embedded
A__ = torch.tensor(lowercase_ )
elif key_name.startswith('''model/wob''' ):
A__ = '''final_logits_bias'''
A__ = vnp.copy() # same in embedded
A__ = state.reshape((1, -1) )
A__ = torch.tensor(lowercase_ )
elif key_name == "model/dense/kernel":
A__ = '''model.last_project.weight'''
A__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A__ = torch.tensor(lowercase_ )
elif key_name == "model/dense_1/bias":
A__ = '''model.last_project.bias'''
A__ = vnp.copy() # same because it is one dimensional
A__ = torch.tensor(lowercase_ )
torch.save(lowercase_ , args.output )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser(
description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""")
parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""")
_lowerCamelCase : Any = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 14 |
import os
import sys
import unittest
_lowerCamelCase : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_lowerCamelCase : Any = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""")
_lowerCamelCase : str = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""")
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple:
'''simple docstring'''
A__ = get_test_to_tester_mapping(UpperCAmelCase__)
A__ = get_test_to_tester_mapping(UpperCAmelCase__)
A__ = {'''BertModelTest''': '''BertModelTester'''}
A__ = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]:
'''simple docstring'''
A__ = get_model_to_test_mapping(UpperCAmelCase__)
A__ = get_model_to_test_mapping(UpperCAmelCase__)
A__ = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
A__ = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str:
'''simple docstring'''
A__ = get_model_to_tester_mapping(UpperCAmelCase__)
A__ = get_model_to_tester_mapping(UpperCAmelCase__)
A__ = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
A__ = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
| 14 | 1 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]:
'''simple docstring'''
A__ = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''')
A__ = BertTokenizer.from_pretrained('''bert-base-uncased''')
A__ = bertabert.config.encoder.vocab_size
A__ = tokenizer.sep_token_id
A__ = tokenizer.cls_token_id
A__ = 128
A__ = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''')
A__ = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''')
A__ = train_dataset.select(range(32))
A__ = val_dataset.select(range(16))
A__ = 4
def _map_to_encoder_decoder_inputs(UpperCAmelCase__ : List[str]):
# Tokenizer will automatically set [BOS] <text> [EOS]
A__ = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=UpperCAmelCase__ , max_length=512)
A__ = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=UpperCAmelCase__ , max_length=128)
A__ = inputs.input_ids
A__ = inputs.attention_mask
A__ = outputs.input_ids
A__ = outputs.input_ids.copy()
A__ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
A__ = outputs.attention_mask
assert all(len(UpperCAmelCase__) == 512 for x in inputs.input_ids)
assert all(len(UpperCAmelCase__) == 128 for x in outputs.input_ids)
return batch
def _compute_metrics(UpperCAmelCase__ : Tuple):
A__ = pred.label_ids
A__ = pred.predictions
# all unnecessary tokens are removed
A__ = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__)
A__ = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__)
A__ = sum([int(pred_str[i] == label_str[i]) for i in range(len(UpperCAmelCase__))]) / len(UpperCAmelCase__)
return {"accuracy": accuracy}
# map train dataset
A__ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCAmelCase__ , batch_size=UpperCAmelCase__ , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
A__ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCAmelCase__ , batch_size=UpperCAmelCase__ , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
A__ = self.get_auto_remove_tmp_dir()
A__ = SeqaSeqTrainingArguments(
output_dir=UpperCAmelCase__ , per_device_train_batch_size=UpperCAmelCase__ , per_device_eval_batch_size=UpperCAmelCase__ , predict_with_generate=UpperCAmelCase__ , evaluation_strategy='''steps''' , do_train=UpperCAmelCase__ , do_eval=UpperCAmelCase__ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
A__ = SeqaSeqTrainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , compute_metrics=_compute_metrics , train_dataset=UpperCAmelCase__ , eval_dataset=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , )
# start training
trainer.train()
| 14 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int = 13 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : Optional[Any]=[16, 32, 64, 128] , UpperCAmelCase__ : int = 7 , UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : int = 37 , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : int = 10 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : List[int] = [2, 2, 2, 2] , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , ) ->List[Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = encoder_stride
A__ = num_attention_outputs
A__ = embed_dim
A__ = embed_dim + 1
A__ = resolution
A__ = depths
A__ = hidden_sizes
A__ = dim
A__ = mlp_expansion_ratio
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->str:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : int) ->str:
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict) ->Dict:
'''simple docstring'''
A__ = TFEfficientFormerModel(config=UpperCAmelCase__)
A__ = model(UpperCAmelCase__ , training=UpperCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str) ->Union[str, Any]:
'''simple docstring'''
A__ = self.type_sequence_label_size
A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__)
A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
A__ = 1
A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__)
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE ( self : int) ->List[str]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[str]:
'''simple docstring'''
A__ = TFEfficientFormerModelTester(self)
A__ = ConfigTester(
self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self : int) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''EfficientFormer does not use inputs_embeds''')
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict:
'''simple docstring'''
pass
@unittest.skip(reason='''EfficientFormer does not support input and output embeddings''')
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCAmelCase__)
A__ = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Any:
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict):
A__ = model_class(UpperCAmelCase__)
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__)
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__)
if hasattr(self.model_tester , '''encoder_seq_length'''):
A__ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , '''chunk_length''') and self.model_tester.chunk_length > 1:
A__ = seq_length * self.model_tester.chunk_length
else:
A__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
A__ = outputs.decoder_hidden_states
self.asseretIsInstance(UpperCAmelCase__ , (list, tuple))
self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''decoder_seq_length''' , UpperCAmelCase__)
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict=False) ->int:
'''simple docstring'''
A__ = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__)
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__)
@unittest.skip(reason='''EfficientFormer does not implement masked image modeling yet''')
def SCREAMING_SNAKE_CASE ( self : str) ->str:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]:
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFEfficientFormerModel.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->str:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''encoder_seq_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''key_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''chunk_length''' , UpperCAmelCase__)
if chunk_length is not None and hasattr(self.model_tester , '''num_hashes'''):
A__ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = True
A__ = model_class(UpperCAmelCase__)
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__)
A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(UpperCAmelCase__)
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__)
A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
A__ = model_class(UpperCAmelCase__)
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
A__ = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=UpperCAmelCase__)
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
A__ = model(UpperCAmelCase__)
self.assertTrue(outputs_dict is not None)
def SCREAMING_SNAKE_CASE ( ) -> Any:
"""simple docstring"""
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained('''snap-research/efficientformer-l1-300''')
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any:
'''simple docstring'''
A__ = TFEfficientFormerForImageClassification.from_pretrained('''snap-research/efficientformer-l1-300''')
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''')
# forward pass
A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__)
# verify the logits
A__ = tf.TensorShape((1, 1_000))
self.assertEqual(outputs.logits.shape , UpperCAmelCase__)
A__ = tf.constant([-0.0555, 0.4825, -0.0852])
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
@slow
def SCREAMING_SNAKE_CASE ( self : Dict) ->int:
'''simple docstring'''
A__ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'''snap-research/efficientformer-l1-300''')
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''')
# forward pass
A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__)
# verify the logits
A__ = tf.TensorShape((1, 1_000))
self.assertEqual(outputs.logits.shape , UpperCAmelCase__)
A__ = tf.constant([-0.1312, 0.4353, -1.0499])
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
| 14 | 1 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
_lowerCamelCase : List[str] = [
("""bert.bert""", """visual_bert"""),
("""bert.cls""", """cls"""),
("""bert.classifier""", """cls"""),
("""token_type_embeddings_visual""", """visual_token_type_embeddings"""),
("""position_embeddings_visual""", """visual_position_embeddings"""),
("""projection""", """visual_projection"""),
]
_lowerCamelCase : Optional[int] = [
"""nlvr2_coco_pre_trained.th""",
"""nlvr2_fine_tuned.th""",
"""nlvr2_pre_trained.th""",
"""vcr_coco_pre_train.th""",
"""vcr_fine_tune.th""",
"""vcr_pre_train.th""",
"""vqa_coco_pre_trained.th""",
"""vqa_fine_tuned.th""",
"""vqa_pre_trained.th""",
]
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
"""simple docstring"""
A__ = torch.load(lowercase_ , map_location='''cpu''' )
return sd
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=rename_keys_prefix ) -> Optional[int]:
"""simple docstring"""
A__ = OrderedDict()
A__ = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
A__ = key
for name_pair in rename_keys_prefix:
A__ = new_key.replace(name_pair[0] , name_pair[1] )
A__ = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
A__ = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> List[str]:
"""simple docstring"""
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), f"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
A__ = '''pretraining'''
if "vcr" in checkpoint_path:
A__ = {'''visual_embedding_dim''': 512}
elif "vqa_advanced" in checkpoint_path:
A__ = {'''visual_embedding_dim''': 2_048}
elif "vqa" in checkpoint_path:
A__ = {'''visual_embedding_dim''': 2_048}
elif "nlvr" in checkpoint_path:
A__ = {'''visual_embedding_dim''': 1_024}
else:
raise NotImplementedError(f"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
A__ = {'''visual_embedding_dim''': 512}
A__ = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
A__ = {'''visual_embedding_dim''': 2_048}
A__ = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
A__ = {'''visual_embedding_dim''': 2_048, '''num_labels''': 3_129}
A__ = '''vqa'''
elif "nlvr" in checkpoint_path:
A__ = {
'''visual_embedding_dim''': 1_024,
'''num_labels''': 2,
}
A__ = '''nlvr'''
A__ = VisualBertConfig(**lowercase_ )
# Load State Dict
A__ = load_state_dict(lowercase_ )
A__ = get_new_dict(lowercase_ , lowercase_ )
if model_type == "pretraining":
A__ = VisualBertForPreTraining(lowercase_ )
elif model_type == "vqa":
A__ = VisualBertForQuestionAnswering(lowercase_ )
elif model_type == "nlvr":
A__ = VisualBertForVisualReasoning(lowercase_ )
elif model_type == "multichoice":
A__ = VisualBertForMultipleChoice(lowercase_ )
model.load_state_dict(lowercase_ )
# Save Checkpoints
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
_lowerCamelCase : str = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 14 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> tuple[float, list[float]]:
"""simple docstring"""
A__ = list(range(len(lowercase_ ) ) )
A__ = [v / w for v, w in zip(lowercase_ , lowercase_ )]
index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ )
A__ = 0
A__ = [0] * len(lowercase_ )
for i in index:
if weight[i] <= capacity:
A__ = 1
max_value += value[i]
capacity -= weight[i]
else:
A__ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
_lowerCamelCase : List[str] = {
"""configuration_speecht5""": [
"""SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP""",
"""SpeechT5Config""",
"""SpeechT5HifiGanConfig""",
],
"""feature_extraction_speecht5""": ["""SpeechT5FeatureExtractor"""],
"""processing_speecht5""": ["""SpeechT5Processor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = ["""SpeechT5Tokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : str = [
"""SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SpeechT5ForSpeechToText""",
"""SpeechT5ForSpeechToSpeech""",
"""SpeechT5ForTextToSpeech""",
"""SpeechT5Model""",
"""SpeechT5PreTrainedModel""",
"""SpeechT5HifiGan""",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
_lowerCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[Any]:
"""simple docstring"""
A__ = args.log_outputs
A__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
A__ = load_metric('''wer''' )
A__ = load_metric('''cer''' )
# compute metrics
A__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
A__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
A__ = f"""WER: {wer_result}\nCER: {cer_result}"""
print(lowercase_ )
with open(f"""{dataset_id}_eval_results.txt""" , '''w''' ) as f:
f.write(lowercase_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
A__ = f"""log_{dataset_id}_predictions.txt"""
A__ = f"""log_{dataset_id}_targets.txt"""
with open(lowercase_ , '''w''' ) as p, open(lowercase_ , '''w''' ) as t:
# mapping function to write output
def write_to_file(lowercase_ , lowercase_ ):
p.write(f"""{i}""" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(f"""{i}""" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(lowercase_ , with_indices=lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
"""simple docstring"""
A__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
A__ = re.sub(lowercase_ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
A__ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
A__ = ''' '''.join(text.split(lowercase_ ) )
return text
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
"""simple docstring"""
A__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowercase_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
A__ = AutoFeatureExtractor.from_pretrained(args.model_id )
A__ = feature_extractor.sampling_rate
# resample audio
A__ = dataset.cast_column('''audio''' , Audio(sampling_rate=lowercase_ ) )
# load eval pipeline
if args.device is None:
A__ = 0 if torch.cuda.is_available() else -1
A__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowercase_ ):
A__ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
A__ = prediction['''text''']
A__ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
A__ = dataset.map(lowercase_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowercase_ , lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
_lowerCamelCase : str = parser.parse_args()
main(args)
| 14 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : List[str] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[Any]:
"""simple docstring"""
A__ = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
A__ = [144, 192, 240]
A__ = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
A__ = [96, 120, 144]
A__ = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
A__ = [64, 80, 96]
A__ = [16, 16, 24, 48, 64, 80, 320]
A__ = 0.05
A__ = 2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
A__ = 512
A__ = 16
A__ = 21
A__ = '''pascal-voc-id2label.json'''
else:
A__ = 1_000
A__ = '''imagenet-1k-id2label.json'''
A__ = '''huggingface/label-files'''
A__ = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) )
A__ = {int(lowercase_ ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=False ) -> Tuple:
"""simple docstring"""
for i in range(1 , 6 ):
if f"""layer_{i}.""" in name:
A__ = name.replace(f"""layer_{i}.""" , f"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
A__ = name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
A__ = name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
A__ = name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
A__ = name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
A__ = name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
A__ = name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
A__ = name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
A__ = name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
A__ = name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if f""".{i}.{j}.""" in name:
A__ = name.replace(f""".{i}.{j}.""" , f""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if f""".{i}.{j}.""" in name:
A__ = name.replace(f""".{i}.{j}.""" , f""".{i}.""" )
if "expand_1x1" in name:
A__ = name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
A__ = name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
A__ = name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if f""".global_rep.{i}.weight""" in name:
A__ = name.replace(f""".global_rep.{i}.weight""" , '''.layernorm.weight''' )
if f""".global_rep.{i}.bias""" in name:
A__ = name.replace(f""".global_rep.{i}.bias""" , '''.layernorm.bias''' )
if ".global_rep." in name:
A__ = name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
A__ = name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
A__ = name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
A__ = name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
A__ = name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
A__ = name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
A__ = name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
A__ = name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
A__ = name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
A__ = name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
A__ = name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
A__ = name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
A__ = '''mobilevit.''' + name
return name
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=False ) -> Union[str, Any]:
"""simple docstring"""
if base_model:
A__ = ''''''
else:
A__ = '''mobilevit.'''
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(lowercase_ )
if key[:8] == "encoder.":
A__ = key[8:]
if "qkv" in key:
A__ = key.split('''.''' )
A__ = int(key_split[0][6:] ) - 1
A__ = int(key_split[3] )
A__ = model.get_submodule(f"""{model_prefix}encoder.layer.{layer_num}""" )
A__ = layer.transformer.layer[transformer_num].attention.attention.all_head_size
A__ = (
f"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
A__ = val[:dim, :]
A__ = val[dim : dim * 2, :]
A__ = val[-dim:, :]
else:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
else:
A__ = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
"""simple docstring"""
A__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A__ = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=False ) -> Optional[Any]:
"""simple docstring"""
A__ = get_mobilevit_config(lowercase_ )
# load original state_dict
A__ = torch.load(lowercase_ , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
A__ = MobileViTForSemanticSegmentation(lowercase_ ).eval()
else:
A__ = MobileViTForImageClassification(lowercase_ ).eval()
A__ = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
A__ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
A__ = image_processor(images=prepare_img() , return_tensors='''pt''' )
A__ = model(**lowercase_ )
A__ = outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
A__ = torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
A__ = torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
A__ = torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(f"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1E-4 )
else:
assert logits.shape == (1, 1_000)
if mobilevit_name == "mobilevit_s":
A__ = torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
A__ = torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
A__ = torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(f"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1E-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(f"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
A__ = {
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
A__ = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowercase_ , organization='''apple''' )
model.push_to_hub(lowercase_ , organization='''apple''' )
if __name__ == "__main__":
_lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--mobilevit_name""",
default="""mobilevit_s""",
type=str,
help=(
"""Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"""
""" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."""
),
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_lowerCamelCase : str = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 14 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase : int = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14 | 1 |
from ...processing_utils import ProcessorMixin
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''SpeechT5FeatureExtractor'''
UpperCAmelCase__ = '''SpeechT5Tokenizer'''
def __init__( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple) ->Union[str, Any]:
'''simple docstring'''
super().__init__(UpperCAmelCase__ , UpperCAmelCase__)
def __call__( self : Dict , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Any) ->Optional[Any]:
'''simple docstring'''
A__ = kwargs.pop('''audio''' , UpperCAmelCase__)
A__ = kwargs.pop('''text''' , UpperCAmelCase__)
A__ = kwargs.pop('''text_target''' , UpperCAmelCase__)
A__ = kwargs.pop('''audio_target''' , UpperCAmelCase__)
A__ = kwargs.pop('''sampling_rate''' , UpperCAmelCase__)
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''')
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''')
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''')
if audio is not None:
A__ = self.feature_extractor(UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__)
elif text is not None:
A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__)
else:
A__ = None
if audio_target is not None:
A__ = self.feature_extractor(audio_target=UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__)
A__ = targets['''input_values''']
elif text_target is not None:
A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = targets['''input_ids''']
else:
A__ = None
if inputs is None:
return targets
if targets is not None:
A__ = labels
A__ = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : int) ->Optional[int]:
'''simple docstring'''
A__ = kwargs.pop('''input_values''' , UpperCAmelCase__)
A__ = kwargs.pop('''input_ids''' , UpperCAmelCase__)
A__ = kwargs.pop('''labels''' , UpperCAmelCase__)
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''')
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''')
if input_values is not None:
A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__)
elif input_ids is not None:
A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__)
else:
A__ = None
if labels is not None:
if "input_ids" in labels or (isinstance(UpperCAmelCase__ , UpperCAmelCase__) and "input_ids" in labels[0]):
A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = targets['''input_ids''']
else:
A__ = self.feature_extractor.feature_size
A__ = self.feature_extractor.num_mel_bins
A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__)
A__ = feature_size_hack
A__ = targets['''input_values''']
else:
A__ = None
if inputs is None:
return targets
if targets is not None:
A__ = labels
A__ = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE ( self : Any , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Union[str, Any]) ->Dict:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__)
| 14 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : List[str] = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
"""VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMSNModel""",
"""ViTMSNForImageClassification""",
"""ViTMSNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14 | 1 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=10 ) -> int:
"""simple docstring"""
A__ = []
for _ in range(lowercase_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=10 ) -> List[str]:
"""simple docstring"""
A__ = []
for step in range(lowercase_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = os.path.join(lowercase_ , '''schedule.bin''' )
torch.save(scheduler.state_dict() , lowercase_ )
A__ = torch.load(lowercase_ )
scheduler.load_state_dict(lowercase_ )
return lrs
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int]) ->Optional[int]:
'''simple docstring'''
self.assertEqual(len(UpperCAmelCase__) , len(UpperCAmelCase__))
for a, b in zip(UpperCAmelCase__ , UpperCAmelCase__):
self.assertAlmostEqual(UpperCAmelCase__ , UpperCAmelCase__ , delta=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]:
'''simple docstring'''
A__ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase__)
A__ = torch.tensor([0.4, 0.2, -0.5])
A__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A__ = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0)
for _ in range(100):
A__ = criterion(UpperCAmelCase__ , UpperCAmelCase__)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2)
def SCREAMING_SNAKE_CASE ( self : str) ->Tuple:
'''simple docstring'''
A__ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase__)
A__ = torch.tensor([0.4, 0.2, -0.5])
A__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A__ = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCAmelCase__ , weight_decay=0.0 , relative_step=UpperCAmelCase__ , scale_parameter=UpperCAmelCase__ , warmup_init=UpperCAmelCase__ , )
for _ in range(1_000):
A__ = criterion(UpperCAmelCase__ , UpperCAmelCase__)
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2)
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = nn.Linear(50 , 50 ) if is_torch_available() else None
UpperCAmelCase__ = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
UpperCAmelCase__ = 10
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str]=None) ->Any:
'''simple docstring'''
self.assertEqual(len(UpperCAmelCase__) , len(UpperCAmelCase__))
for a, b in zip(UpperCAmelCase__ , UpperCAmelCase__):
self.assertAlmostEqual(UpperCAmelCase__ , UpperCAmelCase__ , delta=UpperCAmelCase__ , msg=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]:
'''simple docstring'''
A__ = {'''num_warmup_steps''': 2, '''num_training_steps''': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
A__ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'''num_warmup_steps''': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, '''num_cycles''': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, '''power''': 2.0, '''lr_end''': 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'''num_warmup_steps''': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
A__ , A__ = data
A__ = scheduler_func(self.optimizer , **UpperCAmelCase__)
self.assertEqual(len([scheduler.get_lr()[0]]) , 1)
A__ = unwrap_schedule(UpperCAmelCase__ , self.num_steps)
self.assertListAlmostEqual(
UpperCAmelCase__ , UpperCAmelCase__ , tol=1e-2 , msg=f"""failed for {scheduler_func} in normal scheduler""" , )
A__ = scheduler_func(self.optimizer , **UpperCAmelCase__)
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCAmelCase__) # wrap to test picklability of the schedule
A__ = unwrap_and_save_reload_schedule(UpperCAmelCase__ , self.num_steps)
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ , msg=f"""failed for {scheduler_func} in save and reload""")
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : int , UpperCAmelCase__ : int) ->Tuple:
'''simple docstring'''
A__ = fn
def __call__( self : Optional[Any] , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Dict) ->List[str]:
'''simple docstring'''
return self.fn(*UpperCAmelCase__ , **UpperCAmelCase__)
@classmethod
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : str) ->str:
'''simple docstring'''
A__ = list(map(self , scheduler.lr_lambdas))
| 14 |
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> "list[int]":
"""simple docstring"""
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
A__ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
A__ = 1
if upper_limit > 0:
A__ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowercase_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
_lowerCamelCase : List[Any] = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(F'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 14 | 1 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
)
| 14 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict:
"""simple docstring"""
A__ = args.pruning_method
A__ = args.threshold
A__ = args.model_name_or_path.rstrip('''/''' )
A__ = args.target_model_path
print(f"""Load fine-pruned model from {model_name_or_path}""" )
A__ = torch.load(os.path.join(lowercase_ , '''pytorch_model.bin''' ) )
A__ = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
elif "bias" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
A__ = MagnitudeBinarizer.apply(inputs=lowercase_ , threshold=lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ = TopKBinarizer.apply(lowercase_ , lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ = ThresholdBinarizer.apply(lowercase_ , lowercase_ , lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ , A__ = -0.1, 1.1
A__ = torch.sigmoid(lowercase_ )
A__ = s * (r - l) + l
A__ = s_bar.clamp(min=0.0 , max=1.0 )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
A__ = os.path.join(
os.path.dirname(lowercase_ ) , f"""bertarized_{os.path.basename(lowercase_ )}""" )
if not os.path.isdir(lowercase_ ):
shutil.copytree(lowercase_ , lowercase_ )
print(f"""\nCreated folder {target_model_path}""" )
torch.save(lowercase_ , os.path.join(lowercase_ , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
_lowerCamelCase : int = parser.parse_args()
main(args)
| 14 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
"""deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''perceiver'''
def __init__( self : Any , UpperCAmelCase__ : Optional[Any]=256 , UpperCAmelCase__ : Any=1_280 , UpperCAmelCase__ : int=768 , UpperCAmelCase__ : Optional[Any]=1 , UpperCAmelCase__ : List[Any]=26 , UpperCAmelCase__ : Union[str, Any]=8 , UpperCAmelCase__ : int=8 , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : str="kv" , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : Optional[Any]=1 , UpperCAmelCase__ : List[Any]="gelu" , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : List[Any]=0.02 , UpperCAmelCase__ : str=1e-12 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Any=262 , UpperCAmelCase__ : str=2_048 , UpperCAmelCase__ : Any=56 , UpperCAmelCase__ : Dict=[368, 496] , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : List[Any]=1_920 , UpperCAmelCase__ : List[Any]=16 , UpperCAmelCase__ : Optional[Any]=[1, 16, 224, 224] , **UpperCAmelCase__ : Dict , ) ->List[str]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
A__ = num_latents
A__ = d_latents
A__ = d_model
A__ = num_blocks
A__ = num_self_attends_per_block
A__ = num_self_attention_heads
A__ = num_cross_attention_heads
A__ = qk_channels
A__ = v_channels
A__ = cross_attention_shape_for_attention
A__ = self_attention_widening_factor
A__ = cross_attention_widening_factor
A__ = hidden_act
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = layer_norm_eps
A__ = use_query_residual
# masked language modeling attributes
A__ = vocab_size
A__ = max_position_embeddings
# image classification attributes
A__ = image_size
# flow attributes
A__ = train_size
# multimodal autoencoding attributes
A__ = num_frames
A__ = audio_samples_per_frame
A__ = samples_per_patch
A__ = output_shape
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
A__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''inputs''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
@property
def SCREAMING_SNAKE_CASE ( self : str) ->float:
'''simple docstring'''
return 1e-4
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[TensorType] = None , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 40 , UpperCAmelCase__ : int = 40 , ) ->Mapping[str, Any]:
'''simple docstring'''
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A__ = compute_effective_axis_dimension(
UpperCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A__ = preprocessor.num_special_tokens_to_add(UpperCAmelCase__)
A__ = compute_effective_axis_dimension(
UpperCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase__)
# Generate dummy inputs according to compute batch and sequence
A__ = [''' '''.join(['''a''']) * seq_length] * batch_size
A__ = dict(preprocessor(UpperCAmelCase__ , return_tensors=UpperCAmelCase__))
A__ = inputs.pop('''input_ids''')
return inputs
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A__ = compute_effective_axis_dimension(UpperCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch)
A__ = self._generate_dummy_images(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
A__ = dict(preprocessor(images=UpperCAmelCase__ , return_tensors=UpperCAmelCase__))
A__ = inputs.pop('''pixel_values''')
return inputs
else:
raise ValueError(
'''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''')
| 14 |
_lowerCamelCase : Optional[int] = 65521
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
A__ = 1
A__ = 0
for plain_chr in plain_text:
A__ = (a + ord(lowercase_ )) % MOD_ADLER
A__ = (b + a) % MOD_ADLER
return (b << 16) | a
| 14 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> float:
"""simple docstring"""
A__ = sorted(numsa + numsa )
A__ , A__ = divmod(len(lowercase_ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase : Tuple = [float(x) for x in input("""Enter the elements of first array: """).split()]
_lowerCamelCase : List[Any] = [float(x) for x in input("""Enter the elements of second array: """).split()]
print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 14 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCamelCase : Union[str, Any] = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCamelCase : str = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCamelCase : str = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCamelCase : Any = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
_lowerCamelCase : List[str] = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
_lowerCamelCase : Tuple = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
_lowerCamelCase : Optional[Any] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCamelCase : Optional[int] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCamelCase : Optional[Any] = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = DPRContextEncoderTokenizer
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = DPRQuestionEncoderTokenizer
_lowerCamelCase : int = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
_lowerCamelCase : Any = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
_lowerCamelCase : Dict = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(UpperCAmelCase__ )
class UpperCamelCase_ :
'''simple docstring'''
def __call__( self : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Union[bool, str] = False , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : Optional[bool] = None , **UpperCAmelCase__ : Optional[int] , ) ->BatchEncoding:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , )
elif titles is None or texts is None:
A__ = titles if texts is None else texts
return super().__call__(
UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , **UpperCAmelCase__ , )
A__ = titles if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [titles]
A__ = texts if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [texts]
A__ = len(UpperCAmelCase__)
A__ = questions if not isinstance(UpperCAmelCase__ , UpperCAmelCase__) else [questions] * n_passages
assert len(UpperCAmelCase__) == len(
UpperCAmelCase__), f"""There should be as many titles than texts but got {len(UpperCAmelCase__)} titles and {len(UpperCAmelCase__)} texts."""
A__ = super().__call__(UpperCAmelCase__ , UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids''']
A__ = super().__call__(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)['''input_ids''']
A__ = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCAmelCase__ , UpperCAmelCase__)
]
}
if return_attention_mask is not False:
A__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
A__ = attention_mask
return self.pad(UpperCAmelCase__ , padding=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : BatchEncoding , UpperCAmelCase__ : DPRReaderOutput , UpperCAmelCase__ : int = 16 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 4 , ) ->List[DPRSpanPrediction]:
'''simple docstring'''
A__ = reader_input['''input_ids''']
A__ , A__ , A__ = reader_output[:3]
A__ = len(UpperCAmelCase__)
A__ = sorted(range(UpperCAmelCase__) , reverse=UpperCAmelCase__ , key=relevance_logits.__getitem__)
A__ = []
for doc_id in sorted_docs:
A__ = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
A__ = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
A__ = sequence_ids.index(self.pad_token_id)
else:
A__ = len(UpperCAmelCase__)
A__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCAmelCase__ , top_spans=UpperCAmelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCAmelCase__ , start_index=UpperCAmelCase__ , end_index=UpperCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1]) , ))
if len(UpperCAmelCase__) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , ) ->List[DPRSpanPrediction]:
'''simple docstring'''
A__ = []
for start_index, start_score in enumerate(UpperCAmelCase__):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
A__ = sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__: x[1] , reverse=UpperCAmelCase__)
A__ = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]"""
A__ = end_index - start_index + 1
assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(UpperCAmelCase__) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase__ )
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = READER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__ = DPRReaderTokenizer
| 14 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : str = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''git_vision_model'''
def __init__( self : Any , UpperCAmelCase__ : Any=768 , UpperCAmelCase__ : int=3_072 , UpperCAmelCase__ : List[str]=12 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : List[Any]=224 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : Union[str, Any]="quick_gelu" , UpperCAmelCase__ : Dict=1e-5 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Any=0.02 , **UpperCAmelCase__ : Any , ) ->Optional[int]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
A__ = hidden_size
A__ = intermediate_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = num_channels
A__ = patch_size
A__ = image_size
A__ = initializer_range
A__ = attention_dropout
A__ = layer_norm_eps
A__ = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : int) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase__)
A__ , A__ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__)
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''') == "git":
A__ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''git'''
def __init__( self : Dict , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int=30_522 , UpperCAmelCase__ : Optional[int]=768 , UpperCAmelCase__ : Dict=6 , UpperCAmelCase__ : int=12 , UpperCAmelCase__ : List[str]=3_072 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=1_024 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : Any=1e-12 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : List[Any]="absolute" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : int=101 , UpperCAmelCase__ : Tuple=102 , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : List[str] , ) ->Any:
'''simple docstring'''
super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__)
if vision_config is None:
A__ = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''')
A__ = GitVisionConfig(**UpperCAmelCase__)
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = tie_word_embeddings
A__ = num_image_with_embedding
A__ = bos_token_id
A__ = eos_token_id
def SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__)
A__ = self.vision_config.to_dict()
A__ = self.__class__.model_type
return output
| 14 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''encoder-decoder'''
UpperCAmelCase__ = True
def __init__( self : List[str] , **UpperCAmelCase__ : Union[str, Any]) ->List[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
A__ = kwargs.pop('''encoder''')
A__ = encoder_config.pop('''model_type''')
A__ = kwargs.pop('''decoder''')
A__ = decoder_config.pop('''model_type''')
from ..auto.configuration_auto import AutoConfig
A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = True
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Union[str, Any]) ->PretrainedConfig:
'''simple docstring'''
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''')
A__ = True
A__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__)
A__ = self.encoder.to_dict()
A__ = self.decoder.to_dict()
A__ = self.__class__.model_type
return output
| 14 | 1 |
from collections import namedtuple
_lowerCamelCase : Tuple = namedtuple("""from_to""", """from_ to""")
_lowerCamelCase : Dict = {
"""cubicmeter""": from_to(1, 1),
"""litre""": from_to(0.001, 1000),
"""kilolitre""": from_to(1, 1),
"""gallon""": from_to(0.00_454, 264.172),
"""cubicyard""": from_to(0.76_455, 1.30_795),
"""cubicfoot""": from_to(0.028, 35.3_147),
"""cup""": from_to(0.000_236_588, 4_226.75),
}
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> float:
"""simple docstring"""
if from_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ ''', '''.join(lowercase_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ ''', '''.join(lowercase_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 |
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
"""simple docstring"""
A__ = [0] * len(lowercase_ )
A__ = []
A__ = [1] * len(lowercase_ )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowercase_ ) ):
if indegree[i] == 0:
queue.append(lowercase_ )
while queue:
A__ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
A__ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(lowercase_ )
print(max(lowercase_ ) )
# Adjacency list of Graph
_lowerCamelCase : Optional[int] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 14 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : List[str] = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
"""VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMSNModel""",
"""ViTMSNForImageClassification""",
"""ViTMSNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
_lowerCamelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCamelCase_ ( datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase__ = None
UpperCAmelCase__ = "utf-8"
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = True # deprecated
UpperCAmelCase__ = None # deprecated
UpperCAmelCase__ = 10 << 20 # 10MB
UpperCAmelCase__ = None
class UpperCamelCase_ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
UpperCAmelCase__ = JsonConfig
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
'''simple docstring'''
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''')
A__ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''')
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''')
return datasets.DatasetInfo(features=self.config.features)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : List[Any]) ->Dict:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""")
A__ = dl_manager.download_and_extract(self.config.data_files)
if isinstance(UpperCAmelCase__ , (str, list, tuple)):
A__ = data_files
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = [files]
A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files})]
A__ = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = [files]
A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCAmelCase__ , gen_kwargs={'''files''': files}))
return splits
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : pa.Table) ->pa.Table:
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features) - set(pa_table.column_names):
A__ = self.config.features.arrow_schema.field(UpperCAmelCase__).type
A__ = pa_table.append_column(UpperCAmelCase__ , pa.array([None] * len(UpperCAmelCase__) , type=UpperCAmelCase__))
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A__ = table_cast(UpperCAmelCase__ , self.config.features.arrow_schema)
return pa_table
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Tuple) ->str:
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__)):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
A__ = json.load(UpperCAmelCase__)
# We keep only the field we are interested in
A__ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(UpperCAmelCase__ , (list, tuple)):
A__ = set().union(*[row.keys() for row in dataset])
A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys}
else:
A__ = dataset
A__ = pa.Table.from_pydict(UpperCAmelCase__)
yield file_idx, self._cast_table(UpperCAmelCase__)
# If the file has one json object per line
else:
with open(UpperCAmelCase__ , '''rb''') as f:
A__ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A__ = max(self.config.chunksize // 32 , 16 << 10)
A__ = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
A__ = f.read(self.config.chunksize)
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(UpperCAmelCase__)
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A__ = batch.decode(self.config.encoding , errors=UpperCAmelCase__).encode('''utf-8''')
try:
while True:
try:
A__ = paj.read_json(
io.BytesIO(UpperCAmelCase__) , read_options=paj.ReadOptions(block_size=UpperCAmelCase__))
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(UpperCAmelCase__ , pa.ArrowInvalid)
and "straddling" not in str(UpperCAmelCase__)
or block_size > len(UpperCAmelCase__)
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"""Batch of {len(UpperCAmelCase__)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""")
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
A__ = json.load(UpperCAmelCase__)
except json.JSONDecodeError:
logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""")
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(UpperCAmelCase__ , UpperCAmelCase__): # list is the only sequence type supported in JSON
try:
A__ = set().union(*[row.keys() for row in dataset])
A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys}
A__ = pa.Table.from_pydict(UpperCAmelCase__)
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""")
raise ValueError(f"""Not able to read records in the JSON file at {file}.""") from None
yield file_idx, self._cast_table(UpperCAmelCase__)
break
else:
logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""")
raise ValueError(
f"""Not able to read records in the JSON file at {file}. """
f"""You should probably indicate the field of the JSON file containing your records. """
f"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """
f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase__)
batch_idx += 1
| 14 | 1 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase__ )
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Dict) ->Dict:
'''simple docstring'''
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__)
self.check_model_type(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : int=None , **UpperCAmelCase__ : Any) ->str:
'''simple docstring'''
A__ , A__ = {}, {}
if padding is not None:
A__ = padding
if truncation is not None:
A__ = truncation
if top_k is not None:
A__ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Dict , UpperCAmelCase__ : Union["Image.Image", str] , UpperCAmelCase__ : str = None , **UpperCAmelCase__ : List[Any]) ->List[Any]:
'''simple docstring'''
if isinstance(UpperCAmelCase__ , (Image.Image, str)) and isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = {'''image''': image, '''question''': question}
else:
A__ = image
A__ = super().__call__(UpperCAmelCase__ , **UpperCAmelCase__)
return results
def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Tuple=False) ->Optional[Any]:
'''simple docstring'''
A__ = load_image(inputs['''image'''])
A__ = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)
A__ = self.image_processor(images=UpperCAmelCase__ , return_tensors=self.framework)
model_inputs.update(UpperCAmelCase__)
return model_inputs
def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : List[Any]) ->str:
'''simple docstring'''
A__ = self.model(**UpperCAmelCase__)
return model_outputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int]=5) ->List[Any]:
'''simple docstring'''
if top_k > self.model.config.num_labels:
A__ = self.model.config.num_labels
if self.framework == "pt":
A__ = model_outputs.logits.sigmoid()[0]
A__ , A__ = probs.topk(UpperCAmelCase__)
else:
raise ValueError(f"""Unsupported framework: {self.framework}""")
A__ = scores.tolist()
A__ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(UpperCAmelCase__ , UpperCAmelCase__)]
| 14 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
_lowerCamelCase : List[Any] = """sshleifer/bart-tiny-random"""
_lowerCamelCase : List[Any] = """patrickvonplaten/t5-tiny-random"""
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int:
'''simple docstring'''
return AutoConfig.from_pretrained(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any:
'''simple docstring'''
A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.num_hidden_layers , 1)
def SCREAMING_SNAKE_CASE ( self : int) ->Any:
'''simple docstring'''
A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers)
def SCREAMING_SNAKE_CASE ( self : Dict) ->int:
'''simple docstring'''
A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , 1)
def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase__):
create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=UpperCAmelCase__ , d=UpperCAmelCase__)
| 14 | 1 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_lowerCamelCase : int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Tuple[int, int]:
"""simple docstring"""
def constraint_to_multiple_of(lowercase_ , lowercase_ , lowercase_=0 , lowercase_=None ):
A__ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
A__ = math.floor(val / multiple ) * multiple
if x < min_val:
A__ = math.ceil(val / multiple ) * multiple
return x
A__ = (output_size, output_size) if isinstance(lowercase_ , lowercase_ ) else output_size
A__ , A__ = get_image_size(lowercase_ )
A__ , A__ = output_size
# determine new height and width
A__ = output_height / input_height
A__ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
A__ = scale_width
else:
# fit height
A__ = scale_height
A__ = constraint_to_multiple_of(scale_height * input_height , multiple=lowercase_ )
A__ = constraint_to_multiple_of(scale_width * input_width , multiple=lowercase_ )
return (new_height, new_width)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = ['''pixel_values''']
def __init__( self : int , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 255 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase__ : Optional[Any] , ) ->None:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
A__ = size if size is not None else {'''height''': 384, '''width''': 384}
A__ = get_size_dict(UpperCAmelCase__)
A__ = do_resize
A__ = size
A__ = keep_aspect_ratio
A__ = ensure_multiple_of
A__ = resample
A__ = do_rescale
A__ = rescale_factor
A__ = do_normalize
A__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : int , ) ->np.ndarray:
'''simple docstring'''
A__ = get_size_dict(UpperCAmelCase__)
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""")
A__ = get_resize_output_image_size(
UpperCAmelCase__ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=UpperCAmelCase__ , multiple=UpperCAmelCase__ , )
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[int, float] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[Any] , ) ->Dict:
'''simple docstring'''
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : List[str] , ) ->np.ndarray:
'''simple docstring'''
return normalize(UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : int = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : int = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : float = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : Any , ) ->PIL.Image.Image:
'''simple docstring'''
A__ = do_resize if do_resize is not None else self.do_resize
A__ = size if size is not None else self.size
A__ = get_size_dict(UpperCAmelCase__)
A__ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
A__ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
A__ = resample if resample is not None else self.resample
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = make_list_of_images(UpperCAmelCase__)
if not valid_images(UpperCAmelCase__):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''')
# All transformations expect numpy arrays.
A__ = [to_numpy_array(UpperCAmelCase__) for image in images]
if do_resize:
A__ = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__) for image in images]
if do_rescale:
A__ = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__) for image in images]
if do_normalize:
A__ = [self.normalize(image=UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__) for image in images]
A__ = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__) for image in images]
A__ = {'''pixel_values''': images}
return BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Tuple] = None) ->Tuple:
'''simple docstring'''
A__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase__) != len(UpperCAmelCase__):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''')
if is_torch_tensor(UpperCAmelCase__):
A__ = target_sizes.numpy()
A__ = []
for idx in range(len(UpperCAmelCase__)):
A__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=UpperCAmelCase__)
A__ = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(UpperCAmelCase__)
else:
A__ = logits.argmax(dim=1)
A__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 14 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : List[Any]=30 , UpperCAmelCase__ : Any=400 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Any=[0.5, 0.5, 0.5] , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Optional[int]=1 / 255 , UpperCAmelCase__ : Optional[Any]=True , ) ->str:
'''simple docstring'''
A__ = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int=False) ->Optional[Any]:
'''simple docstring'''
if not batched:
A__ = image_inputs[0]
if isinstance(UpperCAmelCase__ , Image.Image):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size['''shortest_edge'''] * h / w)
A__ = self.size['''shortest_edge''']
elif w > h:
A__ = self.size['''shortest_edge''']
A__ = int(self.size['''shortest_edge'''] * w / h)
else:
A__ = self.size['''shortest_edge''']
A__ = self.size['''shortest_edge''']
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
A__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__: item[0])[0]
A__ = max(UpperCAmelCase__ , key=lambda UpperCAmelCase__: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = DeformableDetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple:
'''simple docstring'''
A__ = DeformableDetrImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_rescale'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_pad'''))
self.assertTrue(hasattr(UpperCAmelCase__ , '''size'''))
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int:
'''simple docstring'''
A__ = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333})
self.assertEqual(image_processor.do_pad , UpperCAmelCase__)
A__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase__)
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84})
self.assertEqual(image_processor.do_pad , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->List[str]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image)
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__)
A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray)
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : int) ->Tuple:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor)
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCAmelCase__ , return_tensors='''pt''').pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCAmelCase__ , batched=UpperCAmelCase__)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]:
'''simple docstring'''
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''') as f:
A__ = json.loads(f.read())
A__ = {'''image_id''': 39_769, '''annotations''': target}
# encode them
A__ = DeformableDetrImageProcessor()
A__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , return_tensors='''pt''')
# verify pixel values
A__ = torch.Size([1, 3, 800, 1_066])
self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__)
A__ = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4))
# verify area
A__ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__))
# verify boxes
A__ = torch.Size([6, 4])
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__)
A__ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1e-3))
# verify image_id
A__ = torch.tensor([39_769])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__))
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__))
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__))
# verify orig_size
A__ = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__))
# verify size
A__ = torch.tensor([800, 1_066])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__))
@slow
def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[int]:
'''simple docstring'''
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''') as f:
A__ = json.loads(f.read())
A__ = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target}
A__ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''')
# encode them
A__ = DeformableDetrImageProcessor(format='''coco_panoptic''')
A__ = image_processing(images=UpperCAmelCase__ , annotations=UpperCAmelCase__ , masks_path=UpperCAmelCase__ , return_tensors='''pt''')
# verify pixel values
A__ = torch.Size([1, 3, 800, 1_066])
self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase__)
A__ = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase__ , atol=1e-4))
# verify area
A__ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase__))
# verify boxes
A__ = torch.Size([6, 4])
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase__)
A__ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase__ , atol=1e-3))
# verify image_id
A__ = torch.tensor([39_769])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase__))
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase__))
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase__))
# verify masks
A__ = 822_873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCAmelCase__)
# verify orig_size
A__ = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase__))
# verify size
A__ = torch.tensor([800, 1_066])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase__))
| 14 | 1 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
_lowerCamelCase : Optional[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCamelCase_ ( datasets.BuilderConfig ):
'''simple docstring'''
UpperCAmelCase__ = None
UpperCAmelCase__ = "utf-8"
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = True # deprecated
UpperCAmelCase__ = None # deprecated
UpperCAmelCase__ = 10 << 20 # 10MB
UpperCAmelCase__ = None
class UpperCamelCase_ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
UpperCAmelCase__ = JsonConfig
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
'''simple docstring'''
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''')
A__ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''')
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''')
return datasets.DatasetInfo(features=self.config.features)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : List[Any]) ->Dict:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""")
A__ = dl_manager.download_and_extract(self.config.data_files)
if isinstance(UpperCAmelCase__ , (str, list, tuple)):
A__ = data_files
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = [files]
A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files})]
A__ = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = [files]
A__ = [dl_manager.iter_files(UpperCAmelCase__) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCAmelCase__ , gen_kwargs={'''files''': files}))
return splits
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : pa.Table) ->pa.Table:
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features) - set(pa_table.column_names):
A__ = self.config.features.arrow_schema.field(UpperCAmelCase__).type
A__ = pa_table.append_column(UpperCAmelCase__ , pa.array([None] * len(UpperCAmelCase__) , type=UpperCAmelCase__))
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A__ = table_cast(UpperCAmelCase__ , self.config.features.arrow_schema)
return pa_table
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Tuple) ->str:
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__)):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
A__ = json.load(UpperCAmelCase__)
# We keep only the field we are interested in
A__ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(UpperCAmelCase__ , (list, tuple)):
A__ = set().union(*[row.keys() for row in dataset])
A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys}
else:
A__ = dataset
A__ = pa.Table.from_pydict(UpperCAmelCase__)
yield file_idx, self._cast_table(UpperCAmelCase__)
# If the file has one json object per line
else:
with open(UpperCAmelCase__ , '''rb''') as f:
A__ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A__ = max(self.config.chunksize // 32 , 16 << 10)
A__ = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
A__ = f.read(self.config.chunksize)
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(UpperCAmelCase__)
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A__ = batch.decode(self.config.encoding , errors=UpperCAmelCase__).encode('''utf-8''')
try:
while True:
try:
A__ = paj.read_json(
io.BytesIO(UpperCAmelCase__) , read_options=paj.ReadOptions(block_size=UpperCAmelCase__))
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(UpperCAmelCase__ , pa.ArrowInvalid)
and "straddling" not in str(UpperCAmelCase__)
or block_size > len(UpperCAmelCase__)
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"""Batch of {len(UpperCAmelCase__)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""")
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
UpperCAmelCase__ , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
A__ = json.load(UpperCAmelCase__)
except json.JSONDecodeError:
logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""")
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(UpperCAmelCase__ , UpperCAmelCase__): # list is the only sequence type supported in JSON
try:
A__ = set().union(*[row.keys() for row in dataset])
A__ = {col: [row.get(UpperCAmelCase__) for row in dataset] for col in keys}
A__ = pa.Table.from_pydict(UpperCAmelCase__)
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""")
raise ValueError(f"""Not able to read records in the JSON file at {file}.""") from None
yield file_idx, self._cast_table(UpperCAmelCase__)
break
else:
logger.error(f"""Failed to read file '{file}' with error {type(UpperCAmelCase__)}: {e}""")
raise ValueError(
f"""Not able to read records in the JSON file at {file}. """
f"""You should probably indicate the field of the JSON file containing your records. """
f"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """
f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase__)
batch_idx += 1
| 14 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
_lowerCamelCase : str = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
_lowerCamelCase : Tuple = typing.Union[np.floataa, int, float] # noqa: UP007
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> VectorOut:
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(lowercase_ ) - np.asarray(lowercase_ )) ** 2 ) )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> VectorOut:
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(lowercase_ , lowercase_ ) ) ** (1 / 2)
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=10_000 , globals=globals() , ) )
benchmark()
| 14 | 1 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_lowerCamelCase : Dict = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
_lowerCamelCase : Union[str, Any] = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
_lowerCamelCase : Optional[Any] = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Any) ->Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Optional[Any]="auto" , UpperCAmelCase__ : Tuple=-1 , UpperCAmelCase__ : Any=0.9 , UpperCAmelCase__ : Optional[Any]=5 , UpperCAmelCase__ : Any=500 , UpperCAmelCase__ : str="gpt2-large" , UpperCAmelCase__ : Dict=-1 , UpperCAmelCase__ : int=1_024 , UpperCAmelCase__ : Any=25 , UpperCAmelCase__ : Optional[int]=5 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : List[Any]=25 , ) ->Any:
'''simple docstring'''
A__ = compute_mauve(
p_text=UpperCAmelCase__ , q_text=UpperCAmelCase__ , p_features=UpperCAmelCase__ , q_features=UpperCAmelCase__ , p_tokens=UpperCAmelCase__ , q_tokens=UpperCAmelCase__ , num_buckets=UpperCAmelCase__ , pca_max_data=UpperCAmelCase__ , kmeans_explained_var=UpperCAmelCase__ , kmeans_num_redo=UpperCAmelCase__ , kmeans_max_iter=UpperCAmelCase__ , featurize_model_name=UpperCAmelCase__ , device_id=UpperCAmelCase__ , max_text_length=UpperCAmelCase__ , divergence_curve_discretization_size=UpperCAmelCase__ , mauve_scaling_factor=UpperCAmelCase__ , verbose=UpperCAmelCase__ , seed=UpperCAmelCase__ , )
return out
| 14 |
from ...processing_utils import ProcessorMixin
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''SpeechT5FeatureExtractor'''
UpperCAmelCase__ = '''SpeechT5Tokenizer'''
def __init__( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple) ->Union[str, Any]:
'''simple docstring'''
super().__init__(UpperCAmelCase__ , UpperCAmelCase__)
def __call__( self : Dict , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Any) ->Optional[Any]:
'''simple docstring'''
A__ = kwargs.pop('''audio''' , UpperCAmelCase__)
A__ = kwargs.pop('''text''' , UpperCAmelCase__)
A__ = kwargs.pop('''text_target''' , UpperCAmelCase__)
A__ = kwargs.pop('''audio_target''' , UpperCAmelCase__)
A__ = kwargs.pop('''sampling_rate''' , UpperCAmelCase__)
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''')
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''')
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''')
if audio is not None:
A__ = self.feature_extractor(UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__)
elif text is not None:
A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__)
else:
A__ = None
if audio_target is not None:
A__ = self.feature_extractor(audio_target=UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__)
A__ = targets['''input_values''']
elif text_target is not None:
A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = targets['''input_ids''']
else:
A__ = None
if inputs is None:
return targets
if targets is not None:
A__ = labels
A__ = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : int) ->Optional[int]:
'''simple docstring'''
A__ = kwargs.pop('''input_values''' , UpperCAmelCase__)
A__ = kwargs.pop('''input_ids''' , UpperCAmelCase__)
A__ = kwargs.pop('''labels''' , UpperCAmelCase__)
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''')
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''')
if input_values is not None:
A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__)
elif input_ids is not None:
A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__)
else:
A__ = None
if labels is not None:
if "input_ids" in labels or (isinstance(UpperCAmelCase__ , UpperCAmelCase__) and "input_ids" in labels[0]):
A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = targets['''input_ids''']
else:
A__ = self.feature_extractor.feature_size
A__ = self.feature_extractor.num_mel_bins
A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__)
A__ = feature_size_hack
A__ = targets['''input_values''']
else:
A__ = None
if inputs is None:
return targets
if targets is not None:
A__ = labels
A__ = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE ( self : Any , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Union[str, Any]) ->Dict:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__)
| 14 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_lowerCamelCase : int = get_tests_dir("""fixtures""")
_lowerCamelCase : List[str] = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
_lowerCamelCase : Tuple = get_tests_dir("""fixtures/dummy-config.json""")
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple:
'''simple docstring'''
A__ = 0
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = AutoFeatureExtractor.from_pretrained('''facebook/wav2vec2-base-960h''')
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[int]:
'''simple docstring'''
A__ = AutoFeatureExtractor.from_pretrained(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
A__ = AutoFeatureExtractor.from_pretrained(UpperCAmelCase__).to_dict()
config_dict.pop('''feature_extractor_type''')
A__ = WavaVecaFeatureExtractor(**UpperCAmelCase__)
# save in new folder
model_config.save_pretrained(UpperCAmelCase__)
config.save_pretrained(UpperCAmelCase__)
A__ = AutoFeatureExtractor.from_pretrained(UpperCAmelCase__)
# make sure private variable is not incorrectly saved
A__ = json.loads(config.to_json_string())
self.assertTrue('''_processor_class''' not in dict_as_saved)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int:
'''simple docstring'''
A__ = AutoFeatureExtractor.from_pretrained(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase__ , '''bert-base is not a local folder and is not a valid model identifier'''):
A__ = AutoFeatureExtractor.from_pretrained('''bert-base''')
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase__ , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'''):
A__ = AutoFeatureExtractor.from_pretrained(UpperCAmelCase__ , revision='''aaaaaa''')
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase__ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
A__ = AutoFeatureExtractor.from_pretrained('''hf-internal-testing/config-no-model''')
def SCREAMING_SNAKE_CASE ( self : List[str]) ->int:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase__):
A__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''')
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase__):
A__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=UpperCAmelCase__)
A__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=UpperCAmelCase__)
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''')
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(UpperCAmelCase__)
A__ = AutoFeatureExtractor.from_pretrained(UpperCAmelCase__ , trust_remote_code=UpperCAmelCase__)
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , '''NewFeatureExtractor''')
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[int]:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , UpperCAmelCase__)
AutoFeatureExtractor.register(UpperCAmelCase__ , UpperCAmelCase__)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase__):
AutoFeatureExtractor.register(UpperCAmelCase__ , UpperCAmelCase__)
# Now that the config is registered, it can be used as any other config with the auto-API
A__ = CustomFeatureExtractor.from_pretrained(UpperCAmelCase__)
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(UpperCAmelCase__)
A__ = AutoFeatureExtractor.from_pretrained(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict:
'''simple docstring'''
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = True
try:
AutoConfig.register('''custom''' , UpperCAmelCase__)
AutoFeatureExtractor.register(UpperCAmelCase__ , UpperCAmelCase__)
# If remote code is not set, the default is to use local
A__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''')
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''')
self.assertTrue(feature_extractor.is_local)
# If remote code is disabled, we load the local one.
A__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=UpperCAmelCase__)
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''')
self.assertTrue(feature_extractor.is_local)
# If remote is enabled, we load from the Hub
A__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=UpperCAmelCase__)
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''')
self.assertTrue(not hasattr(UpperCAmelCase__ , '''is_local'''))
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 14 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : str = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''git_vision_model'''
def __init__( self : Any , UpperCAmelCase__ : Any=768 , UpperCAmelCase__ : int=3_072 , UpperCAmelCase__ : List[str]=12 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : List[Any]=224 , UpperCAmelCase__ : Union[str, Any]=16 , UpperCAmelCase__ : Union[str, Any]="quick_gelu" , UpperCAmelCase__ : Dict=1e-5 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Any=0.02 , **UpperCAmelCase__ : Any , ) ->Optional[int]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
A__ = hidden_size
A__ = intermediate_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = num_channels
A__ = patch_size
A__ = image_size
A__ = initializer_range
A__ = attention_dropout
A__ = layer_norm_eps
A__ = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : int) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase__)
A__ , A__ = cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__)
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''') == "git":
A__ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''git'''
def __init__( self : Dict , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int=30_522 , UpperCAmelCase__ : Optional[int]=768 , UpperCAmelCase__ : Dict=6 , UpperCAmelCase__ : int=12 , UpperCAmelCase__ : List[str]=3_072 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=1_024 , UpperCAmelCase__ : List[str]=0.02 , UpperCAmelCase__ : Any=1e-12 , UpperCAmelCase__ : Union[str, Any]=0 , UpperCAmelCase__ : List[Any]="absolute" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : int=101 , UpperCAmelCase__ : Tuple=102 , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : List[str] , ) ->Any:
'''simple docstring'''
super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__)
if vision_config is None:
A__ = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''')
A__ = GitVisionConfig(**UpperCAmelCase__)
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = tie_word_embeddings
A__ = num_image_with_embedding
A__ = bos_token_id
A__ = eos_token_id
def SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
'''simple docstring'''
A__ = copy.deepcopy(self.__dict__)
A__ = self.vision_config.to_dict()
A__ = self.__class__.model_type
return output
| 14 | 1 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
_lowerCamelCase : Tuple = """pytorch_model.bin"""
_lowerCamelCase : List[str] = """pytorch_model.bin.index.json"""
_lowerCamelCase : Union[str, Any] = """adapter_config.json"""
_lowerCamelCase : Dict = """adapter_model.bin"""
_lowerCamelCase : str = """adapter_model.safetensors"""
_lowerCamelCase : List[str] = """tf_model.h5"""
_lowerCamelCase : List[Any] = """tf_model.h5.index.json"""
_lowerCamelCase : Dict = """model.ckpt"""
_lowerCamelCase : Union[str, Any] = """flax_model.msgpack"""
_lowerCamelCase : Optional[Any] = """flax_model.msgpack.index.json"""
_lowerCamelCase : int = """model.safetensors"""
_lowerCamelCase : Any = """model.safetensors.index.json"""
_lowerCamelCase : List[str] = """config.json"""
_lowerCamelCase : Dict = """preprocessor_config.json"""
_lowerCamelCase : List[Any] = FEATURE_EXTRACTOR_NAME
_lowerCamelCase : Tuple = """generation_config.json"""
_lowerCamelCase : Any = """modelcard.json"""
_lowerCamelCase : Tuple = """▁"""
_lowerCamelCase : Optional[Any] = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
_lowerCamelCase : Optional[int] = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
_lowerCamelCase : Optional[int] = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
_lowerCamelCase : List[str] = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
if version.parse(lowercase_ ) < version.parse(lowercase_ ):
if "dev" in min_version:
A__ = (
'''This example requires a source install from HuggingFace Transformers (see '''
'''`https://huggingface.co/docs/transformers/installation#install-from-source`),'''
)
else:
A__ = f"""This example requires a minimum version of {min_version},"""
error_message += f""" but the version found is {__version__}.\n"""
raise ImportError(
error_message
+ '''Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '''
'''versions of HuggingFace Transformers.''' )
| 14 |
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
A__ = BeautifulSoup(requests.get(lowercase_ , params=lowercase_ ).content , '''html.parser''' )
A__ = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
A__ = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 30,
"""pages""": """3979-3990""",
"""year""": 2018,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 14 | 1 |
import math
_lowerCamelCase : Dict = 10
_lowerCamelCase : List[str] = 7
_lowerCamelCase : str = BALLS_PER_COLOUR * NUM_COLOURS
def SCREAMING_SNAKE_CASE ( lowercase_ = 20 ) -> str:
"""simple docstring"""
A__ = math.comb(lowercase_ , lowercase_ )
A__ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowercase_ )
A__ = NUM_COLOURS * (1 - missing_colour / total)
return f"""{result:.9f}"""
if __name__ == "__main__":
print(solution(20))
| 14 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
"""simple docstring"""
A__ = StableDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
A__ = load_file(lowercase_ )
A__ = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
A__ = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
A__ = pipeline.text_encoder
else:
A__ = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
A__ = pipeline.unet
# find the target layer
A__ = layer_infos.pop(0 )
while len(lowercase_ ) > -1:
try:
A__ = curr_layer.__getattr__(lowercase_ )
if len(lowercase_ ) > 0:
A__ = layer_infos.pop(0 )
elif len(lowercase_ ) == 0:
break
except Exception:
if len(lowercase_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
A__ = layer_infos.pop(0 )
A__ = []
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) )
pair_keys.append(lowercase_ )
else:
pair_keys.append(lowercase_ )
pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
A__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
A__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ ).unsqueeze(2 ).unsqueeze(3 )
else:
A__ = state_dict[pair_keys[0]].to(torch.floataa )
A__ = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase_ , lowercase_ )
# update visited list
for item in pair_keys:
visited.append(lowercase_ )
return pipeline
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
_lowerCamelCase : Tuple = parser.parse_args()
_lowerCamelCase : List[Any] = args.base_model_path
_lowerCamelCase : Optional[int] = args.checkpoint_path
_lowerCamelCase : Dict = args.dump_path
_lowerCamelCase : Optional[Any] = args.lora_prefix_unet
_lowerCamelCase : Optional[int] = args.lora_prefix_text_encoder
_lowerCamelCase : List[Any] = args.alpha
_lowerCamelCase : int = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_lowerCamelCase : Tuple = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 14 | 1 |
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
"""simple docstring"""
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
A__ = mf_knapsack(i - 1 , lowercase_ , lowercase_ , lowercase_ )
else:
A__ = max(
mf_knapsack(i - 1 , lowercase_ , lowercase_ , lowercase_ ) , mf_knapsack(i - 1 , lowercase_ , lowercase_ , j - wt[i - 1] ) + val[i - 1] , )
A__ = val
return f[i][j]
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
A__ = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
A__ = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
A__ = dp[i - 1][w_]
return dp[n][w_], dp
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Dict:
"""simple docstring"""
if not (isinstance(lowercase_ , (list, tuple) ) and isinstance(lowercase_ , (list, tuple) )):
raise ValueError(
'''Both the weights and values vectors must be either lists or tuples''' )
A__ = len(lowercase_ )
if num_items != len(lowercase_ ):
A__ = (
'''The number of weights must be the same as the number of values.\n'''
f"""But got {num_items} weights and {len(lowercase_ )} values"""
)
raise ValueError(lowercase_ )
for i in range(lowercase_ ):
if not isinstance(wt[i] , lowercase_ ):
A__ = (
'''All weights must be integers but got weight of '''
f"""type {type(wt[i] )} at index {i}"""
)
raise TypeError(lowercase_ )
A__ , A__ = knapsack(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
A__ = set()
_construct_solution(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
return optimal_val, example_optional_set
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
"""simple docstring"""
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(lowercase_ , lowercase_ , i - 1 , lowercase_ , lowercase_ )
else:
optimal_set.add(lowercase_ )
_construct_solution(lowercase_ , lowercase_ , i - 1 , j - wt[i - 1] , lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : str = [3, 2, 4, 4]
_lowerCamelCase : Tuple = [4, 3, 2, 3]
_lowerCamelCase : int = 4
_lowerCamelCase : Any = 6
_lowerCamelCase : Optional[int] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
_lowerCamelCase , _lowerCamelCase : str = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
_lowerCamelCase , _lowerCamelCase : str = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset)
| 14 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
_lowerCamelCase : Any = """
import os
"""
_lowerCamelCase : Optional[int] = """
def foo():
import os
return False
"""
_lowerCamelCase : List[Any] = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
_lowerCamelCase : List[Any] = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
_lowerCamelCase : Union[str, Any] = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
_lowerCamelCase : List[Any] = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
_lowerCamelCase : List[Any] = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
_lowerCamelCase : str = """
import os
try:
import bar
except:
raise ValueError()
"""
_lowerCamelCase : Optional[Any] = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
_lowerCamelCase : Any = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
_lowerCamelCase : Dict = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
A__ = os.path.join(lowercase_ , '''test_file.py''' )
with open(lowercase_ , '''w''' ) as _tmp_file:
_tmp_file.write(lowercase_ )
A__ = get_imports(lowercase_ )
assert parsed_imports == ["os"]
| 14 | 1 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase : List[str] = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = BartphoTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = True
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
super().setUp()
A__ = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
A__ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__))))
A__ = {'''unk_token''': '''<unk>'''}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''])
with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''') as fp:
for token in vocab_tokens:
fp.write(f"""{token} {vocab_tokens[token]}\n""")
A__ = BartphoTokenizer(UpperCAmelCase__ , self.monolingual_vocab_file , **self.special_tokens_map)
tokenizer.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE ( self : Optional[int] , **UpperCAmelCase__ : Dict) ->Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = '''This is a là test'''
A__ = '''This is a<unk><unk> test'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : Tuple) ->int:
'''simple docstring'''
A__ = BartphoTokenizer(UpperCAmelCase__ , self.monolingual_vocab_file , **self.special_tokens_map)
A__ = '''This is a là test'''
A__ = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
A__ = tokenizer.tokenize(UpperCAmelCase__)
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
A__ = tokens + [tokenizer.unk_token]
A__ = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__) , UpperCAmelCase__)
| 14 |
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
return int(input_a == input_a == 0 )
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(f"""| 0 | 0 | {nor_gate(0 , 0 )} |""" )
print(f"""| 0 | 1 | {nor_gate(0 , 1 )} |""" )
print(f"""| 1 | 0 | {nor_gate(1 , 0 )} |""" )
print(f"""| 1 | 1 | {nor_gate(1 , 1 )} |""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 14 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCamelCase : Optional[int] = {
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
_lowerCamelCase : Optional[Any] = {
"""junnyu/roformer_chinese_small""": 1536,
"""junnyu/roformer_chinese_base""": 1536,
"""junnyu/roformer_chinese_char_small""": 512,
"""junnyu/roformer_chinese_char_base""": 512,
"""junnyu/roformer_small_discriminator""": 128,
"""junnyu/roformer_small_generator""": 128,
}
_lowerCamelCase : List[Any] = {
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = RoFormerTokenizer
def __init__( self : Any , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Optional[int]="[UNK]" , UpperCAmelCase__ : Optional[int]="[SEP]" , UpperCAmelCase__ : Union[str, Any]="[PAD]" , UpperCAmelCase__ : str="[CLS]" , UpperCAmelCase__ : Tuple="[MASK]" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Tuple=None , **UpperCAmelCase__ : Optional[Any] , ) ->List[Any]:
'''simple docstring'''
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , )
A__ = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
pre_tok_state.get('''lowercase''' , UpperCAmelCase__) != do_lower_case
or pre_tok_state.get('''strip_accents''' , UpperCAmelCase__) != strip_accents
):
A__ = getattr(UpperCAmelCase__ , pre_tok_state.pop('''type'''))
A__ = do_lower_case
A__ = strip_accents
A__ = pre_tok_class(**UpperCAmelCase__)
A__ = do_lower_case
def __getstate__( self : Tuple) ->List[str]:
'''simple docstring'''
A__ = self.__dict__.copy()
A__ = BertPreTokenizer()
return state
def __setstate__( self : Any , UpperCAmelCase__ : Tuple) ->List[Any]:
'''simple docstring'''
A__ = d
A__ = self.__dict__['''_tokenizer'''].get_vocab()
A__ = PreTokenizer.custom(JiebaPreTokenizer(UpperCAmelCase__))
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any]=None) ->Dict:
'''simple docstring'''
A__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None) ->List[int]:
'''simple docstring'''
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None) ->Tuple[str]:
'''simple docstring'''
A__ = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__)
return tuple(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : List[Any]=False , **UpperCAmelCase__ : str , ) ->List[str]:
'''simple docstring'''
A__ = BertPreTokenizer()
return super().save_pretrained(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__)
| 14 |
import os
import sys
import unittest
_lowerCamelCase : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_lowerCamelCase : Any = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""")
_lowerCamelCase : str = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""")
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple:
'''simple docstring'''
A__ = get_test_to_tester_mapping(UpperCAmelCase__)
A__ = get_test_to_tester_mapping(UpperCAmelCase__)
A__ = {'''BertModelTest''': '''BertModelTester'''}
A__ = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]:
'''simple docstring'''
A__ = get_model_to_test_mapping(UpperCAmelCase__)
A__ = get_model_to_test_mapping(UpperCAmelCase__)
A__ = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
A__ = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str:
'''simple docstring'''
A__ = get_model_to_tester_mapping(UpperCAmelCase__)
A__ = get_model_to_tester_mapping(UpperCAmelCase__)
A__ = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
A__ = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
self.assertEqual(get_test_info.to_json(UpperCAmelCase__) , UpperCAmelCase__)
| 14 | 1 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 14 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int = 13 , UpperCAmelCase__ : int = 64 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : Optional[Any]=[16, 32, 64, 128] , UpperCAmelCase__ : int = 7 , UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : int = 37 , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : float = 0.1 , UpperCAmelCase__ : int = 10 , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 128 , UpperCAmelCase__ : List[int] = [2, 2, 2, 2] , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , ) ->List[Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = encoder_stride
A__ = num_attention_outputs
A__ = embed_dim
A__ = embed_dim + 1
A__ = resolution
A__ = depths
A__ = hidden_sizes
A__ = dim
A__ = mlp_expansion_ratio
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->str:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : int) ->str:
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict) ->Dict:
'''simple docstring'''
A__ = TFEfficientFormerModel(config=UpperCAmelCase__)
A__ = model(UpperCAmelCase__ , training=UpperCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str) ->Union[str, Any]:
'''simple docstring'''
A__ = self.type_sequence_label_size
A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__)
A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__ , training=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
A__ = 1
A__ = TFEfficientFormerForImageClassification(UpperCAmelCase__)
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE ( self : int) ->List[str]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[str]:
'''simple docstring'''
A__ = TFEfficientFormerModelTester(self)
A__ = ConfigTester(
self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self : int) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''EfficientFormer does not use inputs_embeds''')
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict:
'''simple docstring'''
pass
@unittest.skip(reason='''EfficientFormer does not support input and output embeddings''')
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCAmelCase__)
A__ = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Any:
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict):
A__ = model_class(UpperCAmelCase__)
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__)
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__)
if hasattr(self.model_tester , '''encoder_seq_length'''):
A__ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , '''chunk_length''') and self.model_tester.chunk_length > 1:
A__ = seq_length * self.model_tester.chunk_length
else:
A__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
A__ = outputs.decoder_hidden_states
self.asseretIsInstance(UpperCAmelCase__ , (list, tuple))
self.assertEqual(len(UpperCAmelCase__) , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''decoder_seq_length''' , UpperCAmelCase__)
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict=False) ->int:
'''simple docstring'''
A__ = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__)
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__)
@unittest.skip(reason='''EfficientFormer does not implement masked image modeling yet''')
def SCREAMING_SNAKE_CASE ( self : str) ->str:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]:
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFEfficientFormerModel.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->str:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
A__ = getattr(self.model_tester , '''seq_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''encoder_seq_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''key_length''' , UpperCAmelCase__)
A__ = getattr(self.model_tester , '''chunk_length''' , UpperCAmelCase__)
if chunk_length is not None and hasattr(self.model_tester , '''num_hashes'''):
A__ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = True
A__ = model_class(UpperCAmelCase__)
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__)
A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(UpperCAmelCase__)
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__) , training=UpperCAmelCase__)
A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCAmelCase__) , self.model_tester.num_attention_outputs)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
A__ = model_class(UpperCAmelCase__)
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
A__ = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=UpperCAmelCase__)
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
A__ = model(UpperCAmelCase__)
self.assertTrue(outputs_dict is not None)
def SCREAMING_SNAKE_CASE ( ) -> Any:
"""simple docstring"""
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained('''snap-research/efficientformer-l1-300''')
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any:
'''simple docstring'''
A__ = TFEfficientFormerForImageClassification.from_pretrained('''snap-research/efficientformer-l1-300''')
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''')
# forward pass
A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__)
# verify the logits
A__ = tf.TensorShape((1, 1_000))
self.assertEqual(outputs.logits.shape , UpperCAmelCase__)
A__ = tf.constant([-0.0555, 0.4825, -0.0852])
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
@slow
def SCREAMING_SNAKE_CASE ( self : Dict) ->int:
'''simple docstring'''
A__ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'''snap-research/efficientformer-l1-300''')
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''tf''')
# forward pass
A__ = model(**UpperCAmelCase__ , training=UpperCAmelCase__)
# verify the logits
A__ = tf.TensorShape((1, 1_000))
self.assertEqual(outputs.logits.shape , UpperCAmelCase__)
A__ = tf.constant([-0.1312, 0.4353, -1.0499])
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
| 14 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.