code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" ,"""False""" ) ) is not True ,reason="""Skipping test because should only be run when releasing minor transformers version""" ,)
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
] )
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=lowercase_ , )
assert hasattr(self , "env" )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = F"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}"""
# distributed data settings
UpperCAmelCase_ : Optional[Any] = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=lowercase_ , instance_count=lowercase_ , instance_type=self.instance_type , debugger_hook_config=lowercase_ , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=lowercase_ , py_version="py36" , )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
TrainingJobAnalytics(lowercase_ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.create_estimator(lowercase_ )
# run training
estimator.fit()
# result dataframe
UpperCAmelCase_ : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase_ : Dict = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
UpperCAmelCase_ : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase_ : Optional[int] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , lowercase_ )
| 357
|
"""simple docstring"""
import datasets
_a = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n'
_a = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n'
_a = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n'
def __a ( __lowerCamelCase, __lowerCamelCase ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
return {"accuracy": simple_accuracy(lowercase_ , lowercase_ )}
| 23
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = 10
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = [1, 2, 3, 4]
UpperCAmelCase_ : Optional[int] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(lowercase_ , self.block_size , 0 ) , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
UpperCAmelCase_ : str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowercase_ , self.block_size , 0 ) , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
UpperCAmelCase_ : Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowercase_ , self.block_size , 0 ) , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
UpperCAmelCase_ : Optional[int] = process_story(lowercase_ )
self.assertEqual(lowercase_ , [] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = ""
UpperCAmelCase_ : Optional[int] = process_story(lowercase_ )
self.assertEqual(lowercase_ , [] )
self.assertEqual(lowercase_ , [] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
UpperCAmelCase_ : Optional[int] = process_story(lowercase_ )
UpperCAmelCase_ : str = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(lowercase_ , lowercase_ )
UpperCAmelCase_ : str = ["It was the best of times."]
self.assertEqual(lowercase_ , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = torch.tensor([1, 2, 3, 4] )
UpperCAmelCase_ : List[str] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(lowercase_ , 0 ).numpy() , expected.numpy() )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
UpperCAmelCase_ : int = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowercase_ , 23 ).numpy() , expected.numpy() )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
UpperCAmelCase_ : Optional[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowercase_ , 1 ).numpy() , expected.numpy() )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = 101
UpperCAmelCase_ : Tuple = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
UpperCAmelCase_ : Tuple = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
UpperCAmelCase_ : Optional[int] = compute_token_type_ids(lowercase_ , lowercase_ )
np.testing.assert_array_equal(lowercase_ , lowercase_ )
| 358
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_a = logging.get_logger(__name__)
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = feature_size
UpperCAmelCase_ : Any = sampling_rate
UpperCAmelCase_ : Any = padding_value
UpperCAmelCase_ : str = kwargs.pop("padding_side" , "right" )
UpperCAmelCase_ : List[str] = kwargs.pop("return_attention_mask" , lowercase_ )
super().__init__(**lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = True , lowercase_ = None , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = None , ):
"""simple docstring"""
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(lowercase_ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
UpperCAmelCase_ : Dict = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
UpperCAmelCase_ : Tuple = processed_features[self.model_input_names[0]]
UpperCAmelCase_ : List[str] = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowercase_ ) == 0:
if return_attention_mask:
UpperCAmelCase_ : Union[str, Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
UpperCAmelCase_ : List[str] = required_input[0]
if isinstance(lowercase_ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
UpperCAmelCase_ : Any = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowercase_ ):
UpperCAmelCase_ : Optional[Any] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowercase_ ):
UpperCAmelCase_ : Dict = "tf"
elif is_torch_tensor(lowercase_ ):
UpperCAmelCase_ : Any = "pt"
elif isinstance(lowercase_ , (int, float, list, tuple, np.ndarray) ):
UpperCAmelCase_ : str = "np"
else:
raise ValueError(
F"""type of {first_element} unknown: {type(lowercase_ )}. """
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
UpperCAmelCase_ : Optional[int] = to_numpy(lowercase_ )
else:
UpperCAmelCase_ : List[str] = [to_numpy(lowercase_ ) for v in value]
# Convert padding_strategy in PaddingStrategy
UpperCAmelCase_ : Dict = self._get_padding_strategies(padding=lowercase_ , max_length=lowercase_ )
UpperCAmelCase_ : str = processed_features[self.model_input_names[0]]
UpperCAmelCase_ : int = len(lowercase_ )
if not all(len(lowercase_ ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
UpperCAmelCase_ : int = []
for i in range(lowercase_ ):
UpperCAmelCase_ : str = {k: v[i] for k, v in processed_features.items()}
# truncation
UpperCAmelCase_ : List[str] = self._truncate(
lowercase_ , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , truncation=lowercase_ , )
truncated_inputs.append(lowercase_ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
UpperCAmelCase_ : str = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
UpperCAmelCase_ : Dict = PaddingStrategy.MAX_LENGTH
UpperCAmelCase_ : List[str] = {}
for i in range(lowercase_ ):
# padding
UpperCAmelCase_ : int = self._pad(
truncated_inputs[i] , max_length=lowercase_ , padding_strategy=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , )
for key, value in outputs.items():
if key not in batch_outputs:
UpperCAmelCase_ : Any = []
if value.dtype is np.dtype(np.floataa ):
UpperCAmelCase_ : List[Any] = value.astype(np.floataa )
batch_outputs[key].append(lowercase_ )
return BatchFeature(lowercase_ , tensor_type=lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = PaddingStrategy.DO_NOT_PAD , lowercase_ = None , lowercase_ = None , ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
UpperCAmelCase_ : Tuple = len(lowercase_ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
UpperCAmelCase_ : Tuple = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
UpperCAmelCase_ : Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowercase_ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
UpperCAmelCase_ : Optional[int] = np.ones(len(lowercase_ ) , dtype=np.intaa )
if needs_to_be_padded:
UpperCAmelCase_ : Dict = max_length - len(lowercase_ )
if self.padding_side == "right":
if return_attention_mask:
UpperCAmelCase_ : List[Any] = np.pad(
processed_features["attention_mask"] , (0, difference) )
UpperCAmelCase_ : Dict = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
UpperCAmelCase_ : Optional[Any] = np.pad(
lowercase_ , lowercase_ , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
UpperCAmelCase_ : Optional[Any] = np.pad(
processed_features["attention_mask"] , (difference, 0) )
UpperCAmelCase_ : Dict = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
UpperCAmelCase_ : str = np.pad(
lowercase_ , lowercase_ , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , ):
"""simple docstring"""
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
UpperCAmelCase_ : Optional[int] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
UpperCAmelCase_ : Union[str, Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
UpperCAmelCase_ : Optional[Any] = len(lowercase_ ) > max_length
if needs_to_be_truncated:
UpperCAmelCase_ : int = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
UpperCAmelCase_ : Dict = processed_features["attention_mask"][:max_length]
return processed_features
def UpperCamelCase__ ( self , lowercase_=False , lowercase_=None ):
"""simple docstring"""
# Get padding strategy
if padding is not False:
if padding is True:
UpperCAmelCase_ : Dict = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : Optional[Any] = PaddingStrategy(lowercase_ )
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : int = padding
else:
UpperCAmelCase_ : str = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 23
| 0
|
from __future__ import annotations
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : List[str] = get_failure_array(__lowerCamelCase )
# 2) Step through text searching for pattern
UpperCAmelCase_ : Union[str, Any] = 0, 0 # index into text, pattern
while i < len(__lowerCamelCase ):
if pattern[j] == text[i]:
if j == (len(__lowerCamelCase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
UpperCAmelCase_ : str = failure[j - 1]
continue
i += 1
return False
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Optional[Any] = [0]
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : Dict = 1
while j < len(__lowerCamelCase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
UpperCAmelCase_ : Any = failure[i - 1]
continue
j += 1
failure.append(__lowerCamelCase )
return failure
if __name__ == "__main__":
# Test 1)
_a = 'abc1abc12'
_a = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
_a = 'alskfjaldsk23adsfabcabc'
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
_a = 'ABABX'
_a = 'ABABZABABYABABX'
assert kmp(pattern, text)
# Test 3)
_a = 'AAAB'
_a = 'ABAAAAAB'
assert kmp(pattern, text)
# Test 4)
_a = 'abcdabcy'
_a = 'abcxabcdabxabcdabcdabcy'
assert kmp(pattern, text)
# Test 5)
_a = 'aabaabaaa'
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 359
|
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = torch.nn.Linear(10 , 10 )
UpperCAmelCase_ : List[str] = torch.optim.SGD(model.parameters() , 0.1 )
UpperCAmelCase_ : Optional[Any] = Accelerator()
UpperCAmelCase_ : Tuple = accelerator.prepare(lowercase_ )
try:
pickle.loads(pickle.dumps(lowercase_ ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 23
| 0
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = """sew-d"""
def __init__( self , lowercase_=32 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_=2 , lowercase_=512 , lowercase_=256 , lowercase_=True , lowercase_=True , lowercase_=("p2c", "c2p") , lowercase_="layer_norm" , lowercase_="gelu_python" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.1 , lowercase_=0.02 , lowercase_=1E-7 , lowercase_=1E-5 , lowercase_="group" , lowercase_="gelu" , lowercase_=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowercase_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase_=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase_=False , lowercase_=128 , lowercase_=16 , lowercase_=True , lowercase_=0.05 , lowercase_=10 , lowercase_=2 , lowercase_=0.0 , lowercase_=10 , lowercase_=0 , lowercase_="mean" , lowercase_=False , lowercase_=False , lowercase_=256 , lowercase_=0 , lowercase_=1 , lowercase_=2 , **lowercase_ , ):
"""simple docstring"""
super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ )
UpperCAmelCase_ : Tuple = hidden_size
UpperCAmelCase_ : Dict = feat_extract_norm
UpperCAmelCase_ : Tuple = feat_extract_activation
UpperCAmelCase_ : str = list(lowercase_ )
UpperCAmelCase_ : Tuple = list(lowercase_ )
UpperCAmelCase_ : List[str] = list(lowercase_ )
UpperCAmelCase_ : Dict = conv_bias
UpperCAmelCase_ : int = num_conv_pos_embeddings
UpperCAmelCase_ : str = num_conv_pos_embedding_groups
UpperCAmelCase_ : int = len(self.conv_dim )
UpperCAmelCase_ : List[str] = num_hidden_layers
UpperCAmelCase_ : Optional[int] = intermediate_size
UpperCAmelCase_ : Tuple = squeeze_factor
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : Tuple = position_buckets
UpperCAmelCase_ : Optional[int] = share_att_key
UpperCAmelCase_ : Dict = relative_attention
UpperCAmelCase_ : List[Any] = norm_rel_ebd
UpperCAmelCase_ : Optional[int] = list(lowercase_ )
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : List[str] = hidden_dropout
UpperCAmelCase_ : Optional[int] = attention_dropout
UpperCAmelCase_ : Optional[Any] = activation_dropout
UpperCAmelCase_ : Tuple = feat_proj_dropout
UpperCAmelCase_ : Optional[int] = final_dropout
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : Union[str, Any] = feature_layer_norm_eps
UpperCAmelCase_ : Optional[Any] = initializer_range
UpperCAmelCase_ : int = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ : Optional[int] = apply_spec_augment
UpperCAmelCase_ : Dict = mask_time_prob
UpperCAmelCase_ : Optional[Any] = mask_time_length
UpperCAmelCase_ : List[str] = mask_time_min_masks
UpperCAmelCase_ : Dict = mask_feature_prob
UpperCAmelCase_ : int = mask_feature_length
UpperCAmelCase_ : Any = mask_feature_min_masks
# ctc loss
UpperCAmelCase_ : Tuple = ctc_loss_reduction
UpperCAmelCase_ : Tuple = ctc_zero_infinity
# sequence classification
UpperCAmelCase_ : Any = use_weighted_layer_sum
UpperCAmelCase_ : Union[str, Any] = classifier_proj_size
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 360
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = """ctrl"""
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""past_key_values"""]
SCREAMING_SNAKE_CASE__ : List[str] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowercase_=24_6534 , lowercase_=256 , lowercase_=1280 , lowercase_=8192 , lowercase_=48 , lowercase_=16 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=1E-6 , lowercase_=0.02 , lowercase_=True , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : Union[str, Any] = n_positions
UpperCAmelCase_ : List[str] = n_embd
UpperCAmelCase_ : Dict = n_layer
UpperCAmelCase_ : Optional[int] = n_head
UpperCAmelCase_ : List[str] = dff
UpperCAmelCase_ : Tuple = resid_pdrop
UpperCAmelCase_ : Optional[Any] = embd_pdrop
UpperCAmelCase_ : str = layer_norm_epsilon
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : List[str] = use_cache
super().__init__(**lowercase_ )
| 23
| 0
|
"""simple docstring"""
def __a ( __lowerCamelCase ):
assert isinstance(__lowerCamelCase, __lowerCamelCase ), f"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
UpperCAmelCase_ : str = f"""The input value of [n={number}] has to be > 0"""
raise ValueError(__lowerCamelCase )
else:
UpperCAmelCase_ : List[str] = sylvester(number - 1 )
UpperCAmelCase_ : List[str] = num - 1
UpperCAmelCase_ : List[str] = num
return lower * upper + 1
if __name__ == "__main__":
print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 361
|
"""simple docstring"""
def __a ( __lowerCamelCase ):
assert isinstance(__lowerCamelCase, __lowerCamelCase ), f"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
UpperCAmelCase_ : str = f"""The input value of [n={number}] has to be > 0"""
raise ValueError(__lowerCamelCase )
else:
UpperCAmelCase_ : List[str] = sylvester(number - 1 )
UpperCAmelCase_ : List[str] = num - 1
UpperCAmelCase_ : List[str] = num
return lower * upper + 1
if __name__ == "__main__":
print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 23
| 0
|
def __a ( __lowerCamelCase ):
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty" )
UpperCAmelCase_ : Tuple = sum(__lowerCamelCase ) / len(__lowerCamelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class A_ (lowercase__ ,lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = IFImgaImgSuperResolutionPipeline
SCREAMING_SNAKE_CASE__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""}
SCREAMING_SNAKE_CASE__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} )
SCREAMING_SNAKE_CASE__ : List[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def UpperCamelCase__ ( self , lowercase_ , lowercase_=0 ):
"""simple docstring"""
if str(lowercase_ ).startswith("mps" ):
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(lowercase_ )
else:
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
UpperCAmelCase_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
UpperCAmelCase_ : Optional[int] = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
UpperCAmelCase_ : int = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_save_load_local()
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 23
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
_a = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
_a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 363
|
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = "ylacombe/bark-small"
UpperCAmelCase_ : Union[str, Any] = tempfile.mkdtemp()
UpperCAmelCase_ : List[str] = "en_speaker_1"
UpperCAmelCase_ : Tuple = "This is a test string"
UpperCAmelCase_ : List[Any] = "speaker_embeddings_path.json"
UpperCAmelCase_ : Any = "speaker_embeddings"
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.get_tokenizer()
UpperCAmelCase_ : Union[str, Any] = BarkProcessor(tokenizer=lowercase_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
UpperCAmelCase_ : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCAmelCase_ : Union[str, Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
UpperCAmelCase_ : int = 35
UpperCAmelCase_ : Optional[Any] = 2
UpperCAmelCase_ : List[Any] = 8
UpperCAmelCase_ : Optional[Any] = {
"semantic_prompt": np.ones(lowercase_ ),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ),
"fine_prompt": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCAmelCase_ : Dict = processor(text=self.input_string , voice_preset=lowercase_ )
UpperCAmelCase_ : List[str] = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , "file.npz" )
np.savez(lowercase_ , **lowercase_ )
UpperCAmelCase_ : Optional[int] = processor(text=self.input_string , voice_preset=lowercase_ )
UpperCAmelCase_ : List[str] = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCAmelCase_ : Tuple = processor(text=self.input_string , voice_preset=self.voice_preset )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.get_tokenizer()
UpperCAmelCase_ : Optional[Any] = BarkProcessor(tokenizer=lowercase_ )
UpperCAmelCase_ : Tuple = processor(text=self.input_string )
UpperCAmelCase_ : Union[str, Any] = tokenizer(
self.input_string , padding="max_length" , max_length=256 , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 23
| 0
|
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __a ( __lowerCamelCase, __lowerCamelCase ):
assert isinstance(__lowerCamelCase, __lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True] )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Union[str, Any] = tmp_path / "cache"
UpperCAmelCase_ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase_ : str = JsonDatasetReader(__lowerCamelCase, cache_dir=__lowerCamelCase, keep_in_memory=__lowerCamelCase ).read()
_check_json_dataset(__lowerCamelCase, __lowerCamelCase )
@pytest.mark.parametrize(
"features", [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
], )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Optional[Any] = tmp_path / "cache"
UpperCAmelCase_ : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase_ : int = features.copy() if features else default_expected_features
UpperCAmelCase_ : Optional[Any] = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase_ : Any = JsonDatasetReader(__lowerCamelCase, features=__lowerCamelCase, cache_dir=__lowerCamelCase ).read()
_check_json_dataset(__lowerCamelCase, __lowerCamelCase )
@pytest.mark.parametrize(
"features", [
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
], )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : List[str] = tmp_path / "cache"
UpperCAmelCase_ : Dict = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
UpperCAmelCase_ : Any = features.copy() if features else default_expected_features
UpperCAmelCase_ : Dict = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase_ : Dict = JsonDatasetReader(__lowerCamelCase, features=__lowerCamelCase, cache_dir=__lowerCamelCase ).read()
assert isinstance(__lowerCamelCase, __lowerCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def __a ( __lowerCamelCase, __lowerCamelCase ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
UpperCAmelCase_ : str = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
UpperCAmelCase_ : str = features.copy()
UpperCAmelCase_ : Union[str, Any] = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase_ : Any = tmp_path / "cache"
UpperCAmelCase_ : str = JsonDatasetReader(__lowerCamelCase, features=__lowerCamelCase, cache_dir=__lowerCamelCase ).read()
assert isinstance(__lowerCamelCase, __lowerCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split", [None, NamedSplit("train" ), "train", "test"] )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Dict = tmp_path / "cache"
UpperCAmelCase_ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase_ : Dict = JsonDatasetReader(__lowerCamelCase, cache_dir=__lowerCamelCase, split=__lowerCamelCase ).read()
_check_json_dataset(__lowerCamelCase, __lowerCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type", [str, list] )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if issubclass(__lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Dict = jsonl_path
elif issubclass(__lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : List[Any] = [jsonl_path]
UpperCAmelCase_ : str = tmp_path / "cache"
UpperCAmelCase_ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase_ : str = JsonDatasetReader(__lowerCamelCase, cache_dir=__lowerCamelCase ).read()
_check_json_dataset(__lowerCamelCase, __lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=("train",) ):
assert isinstance(__lowerCamelCase, __lowerCamelCase )
for split in splits:
UpperCAmelCase_ : List[str] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True] )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Tuple = tmp_path / "cache"
UpperCAmelCase_ : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase_ : List[str] = JsonDatasetReader({"train": jsonl_path}, cache_dir=__lowerCamelCase, keep_in_memory=__lowerCamelCase ).read()
_check_json_datasetdict(__lowerCamelCase, __lowerCamelCase )
@pytest.mark.parametrize(
"features", [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
], )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : int = tmp_path / "cache"
UpperCAmelCase_ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase_ : Any = features.copy() if features else default_expected_features
UpperCAmelCase_ : Union[str, Any] = (
Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase_ : Optional[Any] = JsonDatasetReader({"train": jsonl_path}, features=__lowerCamelCase, cache_dir=__lowerCamelCase ).read()
_check_json_datasetdict(__lowerCamelCase, __lowerCamelCase )
@pytest.mark.parametrize("split", [None, NamedSplit("train" ), "train", "test"] )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if split:
UpperCAmelCase_ : Dict = {split: jsonl_path}
else:
UpperCAmelCase_ : Any = "train"
UpperCAmelCase_ : Union[str, Any] = {"train": jsonl_path, "test": jsonl_path}
UpperCAmelCase_ : Any = tmp_path / "cache"
UpperCAmelCase_ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase_ : Dict = JsonDatasetReader(__lowerCamelCase, cache_dir=__lowerCamelCase ).read()
_check_json_datasetdict(__lowerCamelCase, __lowerCamelCase, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __a ( __lowerCamelCase ):
return json.load(__lowerCamelCase )
def __a ( __lowerCamelCase ):
return [json.loads(__lowerCamelCase ) for line in buffer]
class A_ :
'''simple docstring'''
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ ).write()
buffer.seek(0 )
UpperCAmelCase_ : Dict = load_json_function(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
assert isinstance(exported_content[0] , lowercase_ )
assert len(lowercase_ ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ ).write()
buffer.seek(0 )
UpperCAmelCase_ : Optional[Any] = load_json(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowercase_ , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowercase_ ) == 10
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCAmelCase_ : Optional[Any] = load_json_function(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
assert isinstance(exported_content[0] , lowercase_ )
assert len(lowercase_ ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCAmelCase_ : List[Any] = load_json(lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowercase_ , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowercase_ ) == 10
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
with pytest.raises(lowercase_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowercase_ , lowercase_ , num_proc=0 )
@pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp("data" ) / F"""test.json.{extension}"""
UpperCAmelCase_ : str = str(shared_datadir / F"""test_file.json.{extension}""" )
JsonDatasetWriter(lowercase_ , lowercase_ , compression=lowercase_ ).write()
with fsspec.open(lowercase_ , "rb" , compression="infer" ) as f:
UpperCAmelCase_ : List[str] = f.read()
with fsspec.open(lowercase_ , "rb" , compression="infer" ) as f:
UpperCAmelCase_ : Dict = f.read()
assert exported_content == original_content
| 364
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def __a ( __lowerCamelCase, __lowerCamelCase=False ):
UpperCAmelCase_ : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
UpperCAmelCase_ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase_ : int = ""
else:
UpperCAmelCase_ : Union[str, Any] = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase_ : Tuple = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase_ : Dict = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase_ : Any = in_proj_bias[: config.hidden_size]
UpperCAmelCase_ : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase_ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase_ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase_ : str = in_proj_bias[-config.hidden_size :]
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Tuple = dct.pop(__lowerCamelCase )
UpperCAmelCase_ : Tuple = val
def __a ( ):
UpperCAmelCase_ : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ : str = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : List[str] = DeiTConfig()
# all deit models have fine-tuned heads
UpperCAmelCase_ : Optional[int] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase_ : Tuple = 1000
UpperCAmelCase_ : str = "huggingface/label-files"
UpperCAmelCase_ : str = "imagenet-1k-id2label.json"
UpperCAmelCase_ : List[Any] = json.load(open(hf_hub_download(__lowerCamelCase, __lowerCamelCase, repo_type="dataset" ), "r" ) )
UpperCAmelCase_ : List[str] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
UpperCAmelCase_ : Any = idalabel
UpperCAmelCase_ : int = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : Any = int(deit_name[-6:-4] )
UpperCAmelCase_ : Dict = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
UpperCAmelCase_ : Any = 192
UpperCAmelCase_ : Union[str, Any] = 768
UpperCAmelCase_ : Union[str, Any] = 12
UpperCAmelCase_ : int = 3
elif deit_name[9:].startswith("small" ):
UpperCAmelCase_ : List[str] = 384
UpperCAmelCase_ : List[str] = 1536
UpperCAmelCase_ : Dict = 12
UpperCAmelCase_ : Any = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
UpperCAmelCase_ : int = 1024
UpperCAmelCase_ : List[Any] = 4096
UpperCAmelCase_ : Optional[int] = 24
UpperCAmelCase_ : int = 16
# load original model from timm
UpperCAmelCase_ : Union[str, Any] = timm.create_model(__lowerCamelCase, pretrained=__lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase_ : Optional[Any] = timm_model.state_dict()
UpperCAmelCase_ : Tuple = create_rename_keys(__lowerCamelCase, __lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
read_in_q_k_v(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# load HuggingFace model
UpperCAmelCase_ : str = DeiTForImageClassificationWithTeacher(__lowerCamelCase ).eval()
model.load_state_dict(__lowerCamelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
UpperCAmelCase_ : Union[str, Any] = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
UpperCAmelCase_ : Optional[Any] = DeiTImageProcessor(size=__lowerCamelCase, crop_size=config.image_size )
UpperCAmelCase_ : Any = image_processor(images=prepare_img(), return_tensors="pt" )
UpperCAmelCase_ : int = encoding["pixel_values"]
UpperCAmelCase_ : Optional[Any] = model(__lowerCamelCase )
UpperCAmelCase_ : Any = timm_model(__lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCamelCase, outputs.logits, atol=1E-3 )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_a = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 23
| 0
|
"""simple docstring"""
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = DownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : List[Any] = """down"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04]
super().test_output(lowercase_ )
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = ResnetDownsampleBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : int = """down"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48]
super().test_output(lowercase_ )
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = AttnDownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : List[str] = """down"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57]
super().test_output(lowercase_ )
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = CrossAttnDownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : List[str] = """down"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ : List[Any] = 32
return init_dict, inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83]
super().test_output(lowercase_ )
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = SimpleCrossAttnDownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Dict = """down"""
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ : Dict = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38]
super().test_output(lowercase_ )
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = SkipDownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Optional[Any] = """down"""
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69]
super().test_output(lowercase_ )
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AttnSkipDownBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : int = """down"""
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42]
super().test_output(lowercase_ )
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = DownEncoderBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Any = """down"""
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().get_dummy_input(include_temb=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = {
"in_channels": 32,
"out_channels": 32,
}
UpperCAmelCase_ : Tuple = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26]
super().test_output(lowercase_ )
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = AttnDownEncoderBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Tuple = """down"""
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().get_dummy_input(include_temb=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = {
"in_channels": 32,
"out_channels": 32,
}
UpperCAmelCase_ : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38]
super().test_output(lowercase_ )
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = UNetMidBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : str = """mid"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = {
"in_channels": 32,
"temb_channels": 128,
}
UpperCAmelCase_ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28]
super().test_output(lowercase_ )
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = UNetMidBlockaDCrossAttn # noqa F405
SCREAMING_SNAKE_CASE__ : Dict = """mid"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ : Optional[Any] = 32
return init_dict, inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35]
super().test_output(lowercase_ )
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = UNetMidBlockaDSimpleCrossAttn # noqa F405
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """mid"""
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ : Any = 32
return init_dict, inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80]
super().test_output(lowercase_ )
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = UpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Optional[int] = """up"""
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23]
super().test_output(lowercase_ )
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = ResnetUpsampleBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : List[str] = """up"""
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44]
super().test_output(lowercase_ )
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = CrossAttnUpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Tuple = """up"""
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ : int = 32
return init_dict, inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82]
super().test_output(lowercase_ )
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = SimpleCrossAttnUpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Optional[int] = """up"""
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase_ , include_encoder_hidden_states=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = super().prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = 32
return init_dict, inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02]
super().test_output(lowercase_ )
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = AttnUpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Dict = """up"""
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase_ )
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33]
super().test_output(lowercase_ )
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = SkipUpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : str = """up"""
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62]
super().test_output(lowercase_ )
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = AttnSkipUpBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Optional[Any] = """up"""
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15]
super().test_output(lowercase_ )
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = UpDecoderBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : Tuple = """up"""
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().get_dummy_input(include_temb=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = {"in_channels": 32, "out_channels": 32}
UpperCAmelCase_ : List[Any] = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37]
super().test_output(lowercase_ )
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = AttnUpDecoderBlockaD # noqa F405
SCREAMING_SNAKE_CASE__ : str = """up"""
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().get_dummy_input(include_temb=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = {"in_channels": 32, "out_channels": 32}
UpperCAmelCase_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68]
super().test_output(lowercase_ )
| 365
|
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
UpperCAmelCase_ : Optional[Any] = FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowercase_ , cache_dir=lowercase_ )
UpperCAmelCase_ : List[Any] = [t[-1] for t in os.walk(os.path.join(lowercase_ , os.listdir(lowercase_ )[0] , "snapshots" ) )]
UpperCAmelCase_ : Dict = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin" ) for f in files )
@slow
@require_flax
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowercase_ )
UpperCAmelCase_ : Tuple = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : List[Any] = jax.random.PRNGKey(0 )
UpperCAmelCase_ : List[str] = 4
UpperCAmelCase_ : Tuple = jax.device_count()
UpperCAmelCase_ : Optional[int] = num_samples * [prompt]
UpperCAmelCase_ : List[Any] = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
UpperCAmelCase_ : int = replicate(lowercase_ )
UpperCAmelCase_ : str = jax.random.split(lowercase_ , lowercase_ )
UpperCAmelCase_ : List[str] = shard(lowercase_ )
UpperCAmelCase_ : Dict = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1E-3
assert np.abs(np.abs(lowercase_ , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5E-1
UpperCAmelCase_ : List[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowercase_ ) == num_samples
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=lowercase_ )
UpperCAmelCase_ : Optional[int] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : str = jax.random.PRNGKey(0 )
UpperCAmelCase_ : Union[str, Any] = 50
UpperCAmelCase_ : List[str] = jax.device_count()
UpperCAmelCase_ : List[str] = num_samples * [prompt]
UpperCAmelCase_ : Union[str, Any] = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
UpperCAmelCase_ : Any = replicate(lowercase_ )
UpperCAmelCase_ : List[str] = jax.random.split(lowercase_ , lowercase_ )
UpperCAmelCase_ : List[str] = shard(lowercase_ )
UpperCAmelCase_ : int = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1E-3
assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ )
UpperCAmelCase_ : Any = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : str = jax.random.PRNGKey(0 )
UpperCAmelCase_ : str = 50
UpperCAmelCase_ : List[str] = jax.device_count()
UpperCAmelCase_ : List[Any] = num_samples * [prompt]
UpperCAmelCase_ : Any = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
UpperCAmelCase_ : Dict = replicate(lowercase_ )
UpperCAmelCase_ : str = jax.random.split(lowercase_ , lowercase_ )
UpperCAmelCase_ : Union[str, Any] = shard(lowercase_ )
UpperCAmelCase_ : List[Any] = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : str = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa )
UpperCAmelCase_ : List[Any] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : Dict = jax.random.PRNGKey(0 )
UpperCAmelCase_ : Optional[int] = 50
UpperCAmelCase_ : Optional[int] = jax.device_count()
UpperCAmelCase_ : str = num_samples * [prompt]
UpperCAmelCase_ : int = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
UpperCAmelCase_ : Union[str, Any] = replicate(lowercase_ )
UpperCAmelCase_ : Union[str, Any] = jax.random.split(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[int] = shard(lowercase_ )
UpperCAmelCase_ : Any = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , set_alpha_to_one=lowercase_ , steps_offset=1 , )
UpperCAmelCase_ , UpperCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=lowercase_ , safety_checker=lowercase_ , )
UpperCAmelCase_ : List[Any] = scheduler.create_state()
UpperCAmelCase_ : int = scheduler_state
UpperCAmelCase_ : Union[str, Any] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : Optional[Any] = jax.random.PRNGKey(0 )
UpperCAmelCase_ : int = 50
UpperCAmelCase_ : str = jax.device_count()
UpperCAmelCase_ : List[Any] = num_samples * [prompt]
UpperCAmelCase_ : int = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
UpperCAmelCase_ : int = replicate(lowercase_ )
UpperCAmelCase_ : List[str] = jax.random.split(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[Any] = shard(lowercase_ )
UpperCAmelCase_ : Any = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1E-3
assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : List[str] = jax.device_count()
UpperCAmelCase_ : List[Any] = num_samples * [prompt]
UpperCAmelCase_ : Union[str, Any] = jax.random.split(jax.random.PRNGKey(0 ) , lowercase_ )
UpperCAmelCase_ , UpperCAmelCase_ : str = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ , )
UpperCAmelCase_ : Any = replicate(lowercase_ )
UpperCAmelCase_ : List[str] = pipeline.prepare_inputs(lowercase_ )
UpperCAmelCase_ : List[str] = shard(lowercase_ )
UpperCAmelCase_ : List[Any] = pipeline(lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
UpperCAmelCase_ : int = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
UpperCAmelCase_ , UpperCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ , use_memory_efficient_attention=lowercase_ , )
UpperCAmelCase_ : str = replicate(lowercase_ )
UpperCAmelCase_ : str = pipeline.prepare_inputs(lowercase_ )
UpperCAmelCase_ : Optional[int] = shard(lowercase_ )
UpperCAmelCase_ : str = pipeline(lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
UpperCAmelCase_ : Optional[int] = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 23
| 0
|
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = None ):
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
UpperCAmelCase_ : str = quote(__lowerCamelCase )
return hfh.hf_hub_url(__lowerCamelCase, __lowerCamelCase, repo_type="dataset", revision=__lowerCamelCase )
| 366
|
"""simple docstring"""
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_a = 0
_a = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_a = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_a = tuple[int, int]
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : int = pos_x
UpperCAmelCase_ : List[Any] = pos_y
UpperCAmelCase_ : Union[str, Any] = (pos_y, pos_x)
UpperCAmelCase_ : Any = goal_x
UpperCAmelCase_ : Dict = goal_y
UpperCAmelCase_ : Any = g_cost
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : int = self.calculate_heuristic()
UpperCAmelCase_ : Any = self.g_cost + self.h_cost
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.pos_x - self.goal_x
UpperCAmelCase_ : Union[str, Any] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowercase_ ) + abs(lowercase_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , lowercase_ ):
"""simple docstring"""
return self.f_cost < other.f_cost
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowercase_ )
UpperCAmelCase_ : List[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , lowercase_ )
UpperCAmelCase_ : str = [self.start]
UpperCAmelCase_ : list[Node] = []
UpperCAmelCase_ : int = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCAmelCase_ : List[str] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowercase_ )
self.closed_nodes.append(lowercase_ )
UpperCAmelCase_ : str = self.get_successors(lowercase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowercase_ )
else:
# retrieve the best current path
UpperCAmelCase_ : Union[str, Any] = self.open_nodes.pop(self.open_nodes.index(lowercase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowercase_ )
else:
self.open_nodes.append(lowercase_ )
return [self.start.pos]
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = []
for action in delta:
UpperCAmelCase_ : str = parent.pos_x + action[1]
UpperCAmelCase_ : int = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowercase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowercase_ , lowercase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowercase_ , ) )
return successors
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = node
UpperCAmelCase_ : int = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCAmelCase_ : Optional[int] = current_node.parent
path.reverse()
return path
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = AStar(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[Any] = AStar(lowercase_ , lowercase_ )
UpperCAmelCase_ : Tuple = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
UpperCAmelCase_ : List[str] = self.fwd_astar.open_nodes.pop(0 )
UpperCAmelCase_ : List[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowercase_ , lowercase_ )
self.fwd_astar.closed_nodes.append(lowercase_ )
self.bwd_astar.closed_nodes.append(lowercase_ )
UpperCAmelCase_ : Tuple = current_bwd_node
UpperCAmelCase_ : str = current_fwd_node
UpperCAmelCase_ : Dict = {
self.fwd_astar: self.fwd_astar.get_successors(lowercase_ ),
self.bwd_astar: self.bwd_astar.get_successors(lowercase_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowercase_ )
else:
# retrieve the best current path
UpperCAmelCase_ : List[Any] = astar.open_nodes.pop(
astar.open_nodes.index(lowercase_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowercase_ )
else:
astar.open_nodes.append(lowercase_ )
return [self.fwd_astar.start.pos]
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.fwd_astar.retrace_path(lowercase_ )
UpperCAmelCase_ : int = self.bwd_astar.retrace_path(lowercase_ )
bwd_path.pop()
bwd_path.reverse()
UpperCAmelCase_ : Any = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_a = (0, 0)
_a = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_a = time.time()
_a = AStar(init, goal)
_a = a_star.search()
_a = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
_a = time.time()
_a = BidirectionalAStar(init, goal)
_a = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 23
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_a = logging.get_logger(__name__)
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
warnings.warn(
"The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use SegformerImageProcessor instead." , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 367
|
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = (PNDMScheduler,)
SCREAMING_SNAKE_CASE__ : str = (("""num_inference_steps""", 50),)
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : int = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**lowercase_ )
return config
def UpperCamelCase__ ( self , lowercase_=0 , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = dict(self.forward_default_kwargs )
UpperCAmelCase_ : List[str] = kwargs.pop("num_inference_steps" , lowercase_ )
UpperCAmelCase_ : Union[str, Any] = self.dummy_sample
UpperCAmelCase_ : Dict = 0.1 * sample
UpperCAmelCase_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : List[Any] = self.get_scheduler_config(**lowercase_ )
UpperCAmelCase_ : Dict = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
UpperCAmelCase_ : List[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
UpperCAmelCase_ : Optional[int] = scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
UpperCAmelCase_ : int = dummy_past_residuals[:]
UpperCAmelCase_ : List[str] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : str = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ : Optional[int] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Dict = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self , lowercase_=0 , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = dict(self.forward_default_kwargs )
UpperCAmelCase_ : str = kwargs.pop("num_inference_steps" , lowercase_ )
UpperCAmelCase_ : Optional[int] = self.dummy_sample
UpperCAmelCase_ : List[str] = 0.1 * sample
UpperCAmelCase_ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : str = self.get_scheduler_config()
UpperCAmelCase_ : Dict = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ : List[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
UpperCAmelCase_ : Dict = scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ : Optional[Any] = dummy_past_residuals[:]
UpperCAmelCase_ : Union[str, Any] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Dict = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : int = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = self.scheduler_classes[0]
UpperCAmelCase_ : Union[str, Any] = self.get_scheduler_config(**lowercase_ )
UpperCAmelCase_ : List[Any] = scheduler_class(**lowercase_ )
UpperCAmelCase_ : Tuple = 10
UpperCAmelCase_ : List[str] = self.dummy_model()
UpperCAmelCase_ : str = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase_ : Tuple = model(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[int] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase_ : Any = model(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = dict(self.forward_default_kwargs )
UpperCAmelCase_ : Optional[Any] = kwargs.pop("num_inference_steps" , lowercase_ )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : Any = self.get_scheduler_config()
UpperCAmelCase_ : Tuple = scheduler_class(**lowercase_ )
UpperCAmelCase_ : str = self.dummy_sample
UpperCAmelCase_ : List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase_ , "set_timesteps" ):
scheduler.set_timesteps(lowercase_ )
elif num_inference_steps is not None and not hasattr(lowercase_ , "set_timesteps" ):
UpperCAmelCase_ : List[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase_ : List[str] = dummy_past_residuals[:]
UpperCAmelCase_ : str = scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Any = scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase_ )
UpperCAmelCase_ : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase_ : int = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase_ : Optional[Any] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase_ : List[Any] = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : List[Any] = self.dummy_sample
UpperCAmelCase_ : Optional[int] = 0.1 * sample
UpperCAmelCase_ : List[str] = self.get_scheduler_config()
UpperCAmelCase_ : List[str] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase_ : List[str] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
with self.assertRaises(lowercase_ ):
UpperCAmelCase_ : List[str] = self.scheduler_classes[0]
UpperCAmelCase_ : str = self.get_scheduler_config()
UpperCAmelCase_ : Tuple = scheduler_class(**lowercase_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.full_loop()
UpperCAmelCase_ : Any = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Dict = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_98.13_18 ) < 1E-2
assert abs(result_mean.item() - 0.25_80 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.full_loop(prediction_type="v_prediction" )
UpperCAmelCase_ : str = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Tuple = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 67.39_86 ) < 1E-2
assert abs(result_mean.item() - 0.08_78 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : Union[str, Any] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
UpperCAmelCase_ : List[Any] = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : int = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 2_30.03_99 ) < 1E-2
assert abs(result_mean.item() - 0.29_95 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : Tuple = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
UpperCAmelCase_ : int = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Tuple = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_86.94_82 ) < 1E-2
assert abs(result_mean.item() - 0.24_34 ) < 1E-3
| 23
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 368
|
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_a = object()
# For specifying empty leaf dict `{}`
_a = object()
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Any = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(__lowerCamelCase ) - len(__lowerCamelCase ) + 1 ):
UpperCAmelCase_ : List[str] = [x.match(__lowerCamelCase ) for x, y in zip(__lowerCamelCase, ks[i:] )]
if matches and all(__lowerCamelCase ):
return True
return False
def __a ( __lowerCamelCase ):
def replace(__lowerCamelCase, __lowerCamelCase ):
for rule, replacement in rules:
if _match(__lowerCamelCase, __lowerCamelCase ):
return replacement
return val
return replace
def __a ( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp", __lowerCamelCase )),
(("transformer", "wte", "embedding"), P("mp", __lowerCamelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCamelCase, "mp" )),
(("attention", "out_proj", "kernel"), P("mp", __lowerCamelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__lowerCamelCase, "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp", __lowerCamelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : List[str] = _get_partition_rules()
UpperCAmelCase_ : Any = _replacement_rules(__lowerCamelCase )
UpperCAmelCase_ : Any = {k: _unmatched for k in flatten_dict(__lowerCamelCase )}
UpperCAmelCase_ : Dict = {k: replace(__lowerCamelCase, __lowerCamelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__lowerCamelCase ) )
| 23
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def __a ( __lowerCamelCase, __lowerCamelCase=False ) -> int:
UpperCAmelCase_ : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
UpperCAmelCase_ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False ) -> str:
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase_ : int = ""
else:
UpperCAmelCase_ : Union[str, Any] = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase_ : Tuple = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase_ : Dict = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase_ : Any = in_proj_bias[: config.hidden_size]
UpperCAmelCase_ : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase_ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase_ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase_ : str = in_proj_bias[-config.hidden_size :]
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) -> List[Any]:
UpperCAmelCase_ : Tuple = dct.pop(__lowerCamelCase )
UpperCAmelCase_ : Tuple = val
def __a ( ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ : str = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def __a ( __lowerCamelCase, __lowerCamelCase ) -> List[str]:
UpperCAmelCase_ : List[str] = DeiTConfig()
# all deit models have fine-tuned heads
UpperCAmelCase_ : Optional[int] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase_ : Tuple = 1000
UpperCAmelCase_ : str = "huggingface/label-files"
UpperCAmelCase_ : str = "imagenet-1k-id2label.json"
UpperCAmelCase_ : List[Any] = json.load(open(hf_hub_download(__lowerCamelCase, __lowerCamelCase, repo_type="dataset" ), "r" ) )
UpperCAmelCase_ : List[str] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
UpperCAmelCase_ : Any = idalabel
UpperCAmelCase_ : int = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : Any = int(deit_name[-6:-4] )
UpperCAmelCase_ : Dict = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
UpperCAmelCase_ : Any = 192
UpperCAmelCase_ : Union[str, Any] = 768
UpperCAmelCase_ : Union[str, Any] = 12
UpperCAmelCase_ : int = 3
elif deit_name[9:].startswith("small" ):
UpperCAmelCase_ : List[str] = 384
UpperCAmelCase_ : List[str] = 1536
UpperCAmelCase_ : Dict = 12
UpperCAmelCase_ : Any = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
UpperCAmelCase_ : int = 1024
UpperCAmelCase_ : List[Any] = 4096
UpperCAmelCase_ : Optional[int] = 24
UpperCAmelCase_ : int = 16
# load original model from timm
UpperCAmelCase_ : Union[str, Any] = timm.create_model(__lowerCamelCase, pretrained=__lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase_ : Optional[Any] = timm_model.state_dict()
UpperCAmelCase_ : Tuple = create_rename_keys(__lowerCamelCase, __lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
read_in_q_k_v(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# load HuggingFace model
UpperCAmelCase_ : str = DeiTForImageClassificationWithTeacher(__lowerCamelCase ).eval()
model.load_state_dict(__lowerCamelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
UpperCAmelCase_ : Union[str, Any] = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
UpperCAmelCase_ : Optional[Any] = DeiTImageProcessor(size=__lowerCamelCase, crop_size=config.image_size )
UpperCAmelCase_ : Any = image_processor(images=prepare_img(), return_tensors="pt" )
UpperCAmelCase_ : int = encoding["pixel_values"]
UpperCAmelCase_ : Optional[Any] = model(__lowerCamelCase )
UpperCAmelCase_ : Any = timm_model(__lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCamelCase, outputs.logits, atol=1E-3 )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_a = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 369
|
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_a = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = [file for file in os.listdir(lowercase_ ) if os.path.isfile(os.path.join(lowercase_ , lowercase_ ) )]
if identifier is not None:
UpperCAmelCase_ : Dict = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase_ , lowercase_ ):
for n_ in n_identifier:
UpperCAmelCase_ : str = [file for file in files if n_ not in file]
else:
UpperCAmelCase_ : Any = [file for file in files if n_identifier not in file]
UpperCAmelCase_ : Union[str, Any] = ignore_files or []
ignore_files.append("__init__.py" )
UpperCAmelCase_ : Optional[int] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , lowercase_ )
if only_modules:
UpperCAmelCase_ : str = file.split("." )[0]
try:
UpperCAmelCase_ : str = getattr(lowercase_ , lowercase_ )
UpperCAmelCase_ : Tuple = doctest.DocTestSuite(lowercase_ )
UpperCAmelCase_ : int = unittest.TextTestRunner().run(lowercase_ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
UpperCAmelCase_ : Optional[Any] = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = Path("src/transformers" )
UpperCAmelCase_ : str = "modeling"
UpperCAmelCase_ : Optional[Any] = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = Path("src/transformers" )
UpperCAmelCase_ : Any = "tokenization"
self.analyze_directory(lowercase_ , identifier=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = Path("src/transformers" )
UpperCAmelCase_ : List[Any] = "configuration"
self.analyze_directory(lowercase_ , identifier=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = Path("src/transformers" )
UpperCAmelCase_ : List[Any] = ["configuration", "modeling", "tokenization"]
self.analyze_directory(lowercase_ , n_identifier=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = Path("docs/source" )
UpperCAmelCase_ : Union[str, Any] = ["favicon.ico"]
self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_ )
| 23
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = """mra"""
def __init__( self , lowercase_=5_0265 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=1 , lowercase_=0.02 , lowercase_=1E-5 , lowercase_="absolute" , lowercase_=4 , lowercase_="full" , lowercase_=0 , lowercase_=0 , lowercase_=1 , lowercase_=0 , lowercase_=2 , **lowercase_ , ):
"""simple docstring"""
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase_ : Optional[int] = vocab_size
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = initializer_range
UpperCAmelCase_ : List[str] = type_vocab_size
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : str = position_embedding_type
UpperCAmelCase_ : int = block_per_row
UpperCAmelCase_ : Optional[int] = approx_mode
UpperCAmelCase_ : int = initial_prior_first_n_blocks
UpperCAmelCase_ : Optional[int] = initial_prior_diagonal_n_blocks
| 370
|
"""simple docstring"""
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
_a = (
'This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'
)
def __a ( __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
return (preds == labels).mean()
def __a ( __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
UpperCAmelCase_ : Optional[Any] = simple_accuracy(__lowerCamelCase, __lowerCamelCase )
UpperCAmelCase_ : List[Any] = fa_score(y_true=__lowerCamelCase, y_pred=__lowerCamelCase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def __a ( __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
UpperCAmelCase_ : Any = pearsonr(__lowerCamelCase, __lowerCamelCase )[0]
UpperCAmelCase_ : Optional[Any] = spearmanr(__lowerCamelCase, __lowerCamelCase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
assert len(__lowerCamelCase ) == len(__lowerCamelCase ), f"""Predictions and labels have mismatched lengths {len(__lowerCamelCase )} and {len(__lowerCamelCase )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "mrpc":
return acc_and_fa(__lowerCamelCase, __lowerCamelCase )
elif task_name == "sts-b":
return pearson_and_spearman(__lowerCamelCase, __lowerCamelCase )
elif task_name == "qqp":
return acc_and_fa(__lowerCamelCase, __lowerCamelCase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "rte":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "hans":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
else:
raise KeyError(__lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError(f"""Predictions and labels have mismatched lengths {len(__lowerCamelCase )} and {len(__lowerCamelCase )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
else:
raise KeyError(__lowerCamelCase )
| 23
| 0
|
"""simple docstring"""
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self , lowercase_="" , lowercase_="train" ):
"""simple docstring"""
assert os.path.isdir(lowercase_ )
UpperCAmelCase_ : str = []
UpperCAmelCase_ : int = os.listdir(lowercase_ )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
UpperCAmelCase_ : Union[str, Any] = os.path.join(lowercase_ , lowercase_ )
if not os.path.isfile(lowercase_ ):
continue
self.documents.append(lowercase_ )
def __len__( self ):
"""simple docstring"""
return len(self.documents )
def __getitem__( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.documents[idx]
UpperCAmelCase_ : Dict = document_path.split("/" )[-1]
with open(lowercase_ , encoding="utf-8" ) as source:
UpperCAmelCase_ : Union[str, Any] = source.read()
UpperCAmelCase_ : Any = process_story(lowercase_ )
return document_name, story_lines, summary_lines
def __a ( __lowerCamelCase ) -> List[Any]:
UpperCAmelCase_ : Dict = list(filter(lambda __lowerCamelCase : len(__lowerCamelCase ) != 0, [line.strip() for line in raw_story.split("\n" )] ) )
# for some unknown reason some lines miss a period, add it
UpperCAmelCase_ : List[Any] = [_add_missing_period(__lowerCamelCase ) for line in nonempty_lines]
# gather article lines
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Optional[int] = deque(__lowerCamelCase )
while True:
try:
UpperCAmelCase_ : List[str] = lines.popleft()
if element.startswith("@highlight" ):
break
story_lines.append(__lowerCamelCase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
UpperCAmelCase_ : int = list(filter(lambda __lowerCamelCase : not t.startswith("@highlight" ), __lowerCamelCase ) )
return story_lines, summary_lines
def __a ( __lowerCamelCase ) -> List[str]:
UpperCAmelCase_ : List[Any] = [".", "!", "?", "...", "'", "`", "\"", "\u2019", "\u2019", ")"]
if line.startswith("@highlight" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) -> Union[str, Any]:
if len(__lowerCamelCase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(__lowerCamelCase )) )
return sequence
def __a ( __lowerCamelCase, __lowerCamelCase ) -> List[str]:
UpperCAmelCase_ : int = torch.ones_like(__lowerCamelCase )
UpperCAmelCase_ : Optional[Any] = sequence == pad_token_id
UpperCAmelCase_ : Any = 0
return mask
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) -> Union[str, Any]:
UpperCAmelCase_ : int = [tokenizer.encode(__lowerCamelCase ) for line in story_lines]
UpperCAmelCase_ : Optional[Any] = [token for sentence in story_lines_token_ids for token in sentence]
UpperCAmelCase_ : Optional[Any] = [tokenizer.encode(__lowerCamelCase ) for line in summary_lines]
UpperCAmelCase_ : str = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def __a ( __lowerCamelCase, __lowerCamelCase ) -> Dict:
UpperCAmelCase_ : Tuple = []
for sequence in batch:
UpperCAmelCase_ : Dict = -1
UpperCAmelCase_ : Any = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(__lowerCamelCase )
return torch.tensor(__lowerCamelCase )
| 371
|
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {'vocab_file': 'vocab.json'}
_a = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
_a = {'mgp-str': 27}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowercase_ , lowercase_="[GO]" , lowercase_="[GO]" , lowercase_="[s]" , lowercase_="[GO]" , **lowercase_ ):
"""simple docstring"""
super().__init__(
unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , pad_token=lowercase_ , **lowercase_ , )
with open(lowercase_ , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase_ : Dict = json.load(lowercase_ )
UpperCAmelCase_ : Dict = {v: k for k, v in self.vocab.items()}
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.vocab )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = []
for s in text:
char_tokens.extend(lowercase_ )
return char_tokens
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return self.vocab.get(lowercase_ , self.vocab.get(self.unk_token ) )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return self.decoder.get(lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
if not os.path.isdir(lowercase_ ):
logger.error("Vocabulary path ({}) should be a directory".format(lowercase_ ) )
return
UpperCAmelCase_ : Optional[int] = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(lowercase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowercase_ , ensure_ascii=lowercase_ ) + "\n" )
return (vocab_file,)
| 23
| 0
|
"""simple docstring"""
from __future__ import annotations
from PIL import Image
# Define glider example
_a = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
_a = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : int = []
for i in range(len(__lowerCamelCase ) ):
UpperCAmelCase_ : Tuple = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
UpperCAmelCase_ : int = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(__lowerCamelCase ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(__lowerCamelCase ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(__lowerCamelCase ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
UpperCAmelCase_ : Optional[Any] = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(__lowerCamelCase )
return next_generation
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : str = []
for _ in range(__lowerCamelCase ):
# Create output image
UpperCAmelCase_ : str = Image.new("RGB", (len(cells[0] ), len(__lowerCamelCase )) )
UpperCAmelCase_ : str = img.load()
# Save cells to image
for x in range(len(__lowerCamelCase ) ):
for y in range(len(cells[0] ) ):
UpperCAmelCase_ : Union[str, Any] = 255 - cells[y][x] * 255
UpperCAmelCase_ : List[Any] = (colour, colour, colour)
# Save image
images.append(__lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = new_generation(__lowerCamelCase )
return images
if __name__ == "__main__":
_a = generate_images(GLIDER, 16)
images[0].save('out.gif', save_all=True, append_images=images[1:])
| 350
|
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_a = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
_a = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
_a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __a ( __lowerCamelCase ):
return x[0]
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = get_letter_count(__lowerCamelCase )
UpperCAmelCase_ : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(__lowerCamelCase )
UpperCAmelCase_ : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find, reverse=__lowerCamelCase )
UpperCAmelCase_ : Any = "".join(freq_to_letter[freq] )
UpperCAmelCase_ : str = list(freq_to_letter_str.items() )
freq_pairs.sort(key=__lowerCamelCase, reverse=__lowerCamelCase )
UpperCAmelCase_ : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(__lowerCamelCase )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = get_frequency_order(__lowerCamelCase )
UpperCAmelCase_ : int = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23
| 0
|
"""simple docstring"""
import sys
from collections import defaultdict
class A_ :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = []
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return self.node_position[vertex]
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = pos
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
UpperCAmelCase_ : List[Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
UpperCAmelCase_ : Dict = 2 * start + 1
else:
UpperCAmelCase_ : List[str] = 2 * start + 2
if heap[smallest_child] < heap[start]:
UpperCAmelCase_ : Tuple = heap[smallest_child], positions[smallest_child]
UpperCAmelCase_ : List[str] = (
heap[start],
positions[start],
)
UpperCAmelCase_ : int = temp, tempa
UpperCAmelCase_ : Any = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , lowercase_ )
self.top_to_bottom(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = position[index]
while index != 0:
UpperCAmelCase_ : str = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
UpperCAmelCase_ : Any = heap[parent]
UpperCAmelCase_ : Tuple = position[parent]
self.set_position(position[parent] , lowercase_ )
else:
UpperCAmelCase_ : Optional[int] = val
UpperCAmelCase_ : Tuple = temp
self.set_position(lowercase_ , lowercase_ )
break
UpperCAmelCase_ : Dict = parent
else:
UpperCAmelCase_ : Optional[Any] = val
UpperCAmelCase_ : Tuple = temp
self.set_position(lowercase_ , 0 )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = len(lowercase_ ) // 2 - 1
for i in range(lowercase_ , -1 , -1 ):
self.top_to_bottom(lowercase_ , lowercase_ , len(lowercase_ ) , lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = positions[0]
UpperCAmelCase_ : Dict = sys.maxsize
self.top_to_bottom(lowercase_ , 0 , len(lowercase_ ) , lowercase_ )
return temp
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = Heap()
UpperCAmelCase_ : List[str] = [0] * len(__lowerCamelCase )
UpperCAmelCase_ : Any = [-1] * len(__lowerCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
UpperCAmelCase_ : Any = [] # Heap of Distance of vertices from their neighboring vertex
UpperCAmelCase_ : Optional[Any] = []
for vertex in range(len(__lowerCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(__lowerCamelCase )
heap.node_position.append(__lowerCamelCase )
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : Tuple = sys.maxsize
for neighbor, distance in adjacency_list[0]:
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : List[Any] = distance
heap.heapify(__lowerCamelCase, __lowerCamelCase )
for _ in range(1, len(__lowerCamelCase ) ):
UpperCAmelCase_ : List[str] = heap.delete_minimum(__lowerCamelCase, __lowerCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
UpperCAmelCase_ : Optional[Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__lowerCamelCase )]
):
UpperCAmelCase_ : Any = distance
heap.bottom_to_top(
__lowerCamelCase, heap.get_position(__lowerCamelCase ), __lowerCamelCase, __lowerCamelCase )
UpperCAmelCase_ : Tuple = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_a = int(input('Enter number of edges: ').strip())
_a = defaultdict(list)
for _ in range(edges_number):
_a = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 351
|
"""simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_a = logging.getLogger()
def __a ( ):
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument("-f" )
UpperCAmelCase_ : Dict = parser.parse_args()
return args.f
class A_ (lowercase__ ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = logging.StreamHandler(sys.stdout )
logger.addHandler(lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(lowercase_ , "argv" , lowercase_ ):
UpperCAmelCase_ : List[str] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowercase_ , 0.6_66 )
@slow
@require_torch_non_multi_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(lowercase_ )
UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowercase_ )
UpperCAmelCase_ : Dict = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowercase_ )
| 23
| 0
|
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
_a = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __a ( __lowerCamelCase ):
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
return max(metric_fn(__lowerCamelCase, __lowerCamelCase ) for gt in ground_truths )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Optional[int] = [line.strip() for line in open(__lowerCamelCase, "r" ).readlines()]
UpperCAmelCase_ : int = []
if args.gold_data_mode == "qa":
UpperCAmelCase_ : str = pd.read_csv(__lowerCamelCase, sep="\t", header=__lowerCamelCase )
for answer_list in data[1]:
UpperCAmelCase_ : int = ast.literal_eval(__lowerCamelCase )
answers.append(__lowerCamelCase )
else:
UpperCAmelCase_ : Tuple = [line.strip() for line in open(__lowerCamelCase, "r" ).readlines()]
UpperCAmelCase_ : Optional[int] = [[reference] for reference in references]
UpperCAmelCase_ : List[str] = 0
for prediction, ground_truths in zip(__lowerCamelCase, __lowerCamelCase ):
total += 1
em += metric_max_over_ground_truths(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
fa += metric_max_over_ground_truths(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
UpperCAmelCase_ : Optional[int] = 100.0 * em / total
UpperCAmelCase_ : Dict = 100.0 * fa / total
logger.info(f"""F1: {fa:.2f}""" )
logger.info(f"""EM: {em:.2f}""" )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Any = args.k
UpperCAmelCase_ : str = [line.strip() for line in open(__lowerCamelCase, "r" ).readlines()]
UpperCAmelCase_ : int = [line.strip() for line in open(__lowerCamelCase, "r" ).readlines()]
UpperCAmelCase_ : Union[str, Any] = 0
for hypo, reference in zip(__lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : List[Any] = set(hypo.split("\t" )[:k] )
UpperCAmelCase_ : Optional[Any] = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
UpperCAmelCase_ : Optional[Any] = 100.0 * em / total
logger.info(f"""Precision@{k}: {em: .2f}""" )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
def strip_title(__lowerCamelCase ):
if title.startswith("\"" ):
UpperCAmelCase_ : Union[str, Any] = title[1:]
if title.endswith("\"" ):
UpperCAmelCase_ : List[str] = title[:-1]
return title
UpperCAmelCase_ : Optional[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__lowerCamelCase, return_tensors="pt", padding=__lowerCamelCase, truncation=__lowerCamelCase, )["input_ids"].to(args.device )
UpperCAmelCase_ : Optional[int] = rag_model.rag.question_encoder(__lowerCamelCase )
UpperCAmelCase_ : Dict = question_enc_outputs[0]
UpperCAmelCase_ : str = rag_model.retriever(
__lowerCamelCase, question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy(), prefix=rag_model.rag.generator.config.prefix, n_docs=rag_model.config.n_docs, return_tensors="pt", )
UpperCAmelCase_ : Tuple = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
UpperCAmelCase_ : Tuple = []
for docs in all_docs:
UpperCAmelCase_ : Optional[int] = [strip_title(__lowerCamelCase ) for title in docs["title"]]
provenance_strings.append("\t".join(__lowerCamelCase ) )
return provenance_strings
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
with torch.no_grad():
UpperCAmelCase_ : Any = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__lowerCamelCase, return_tensors="pt", padding=__lowerCamelCase, truncation=__lowerCamelCase )
UpperCAmelCase_ : Tuple = inputs_dict.input_ids.to(args.device )
UpperCAmelCase_ : Any = inputs_dict.attention_mask.to(args.device )
UpperCAmelCase_ : Any = rag_model.generate( # rag_model overwrites generate
__lowerCamelCase, attention_mask=__lowerCamelCase, num_beams=args.num_beams, min_length=args.min_length, max_length=args.max_length, early_stopping=__lowerCamelCase, num_return_sequences=1, bad_words_ids=[[0, 0]], )
UpperCAmelCase_ : Tuple = rag_model.retriever.generator_tokenizer.batch_decode(__lowerCamelCase, skip_special_tokens=__lowerCamelCase )
if args.print_predictions:
for q, a in zip(__lowerCamelCase, __lowerCamelCase ):
logger.info("Q: {} - A: {}".format(__lowerCamelCase, __lowerCamelCase ) )
return answers
def __a ( ):
UpperCAmelCase_ : Dict = argparse.ArgumentParser()
parser.add_argument(
"--model_type", choices=["rag_sequence", "rag_token", "bart"], type=__lowerCamelCase, help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
), )
parser.add_argument(
"--index_name", default=__lowerCamelCase, choices=["exact", "compressed", "legacy"], type=__lowerCamelCase, help="RAG model retriever type", )
parser.add_argument(
"--index_path", default=__lowerCamelCase, type=__lowerCamelCase, help="Path to the retrieval index", )
parser.add_argument("--n_docs", default=5, type=__lowerCamelCase, help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path", default=__lowerCamelCase, type=__lowerCamelCase, required=__lowerCamelCase, help="Path to pretrained checkpoints or model identifier from huggingface.co/models", )
parser.add_argument(
"--eval_mode", choices=["e2e", "retrieval"], default="e2e", type=__lowerCamelCase, help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
), )
parser.add_argument("--k", default=1, type=__lowerCamelCase, help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set", default=__lowerCamelCase, type=__lowerCamelCase, required=__lowerCamelCase, help="Path to a file containing evaluation samples", )
parser.add_argument(
"--gold_data_path", default=__lowerCamelCase, type=__lowerCamelCase, required=__lowerCamelCase, help="Path to a tab-separated file with gold samples", )
parser.add_argument(
"--gold_data_mode", default="qa", type=__lowerCamelCase, choices=["qa", "ans"], help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
), )
parser.add_argument(
"--predictions_path", type=__lowerCamelCase, default="predictions.txt", help="Name of the predictions file, to be stored in the checkpoints directory", )
parser.add_argument(
"--eval_all_checkpoints", action="store_true", help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", )
parser.add_argument(
"--eval_batch_size", default=8, type=__lowerCamelCase, help="Batch size per GPU/CPU for evaluation.", )
parser.add_argument(
"--recalculate", help="Recalculate predictions even if the prediction file exists", action="store_true", )
parser.add_argument(
"--num_beams", default=4, type=__lowerCamelCase, help="Number of beams to be used when generating answers", )
parser.add_argument("--min_length", default=1, type=__lowerCamelCase, help="Min length of the generated answers" )
parser.add_argument("--max_length", default=50, type=__lowerCamelCase, help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions", action="store_true", help="If True, prints predictions while evaluating.", )
parser.add_argument(
"--print_docs", action="store_true", help="If True, prints docs retried while generating.", )
UpperCAmelCase_ : List[str] = parser.parse_args()
UpperCAmelCase_ : Tuple = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Tuple = {}
if args.model_type is None:
UpperCAmelCase_ : Union[str, Any] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
UpperCAmelCase_ : List[Any] = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
UpperCAmelCase_ : str = args.n_docs
if args.index_name is not None:
UpperCAmelCase_ : Optional[Any] = args.index_name
if args.index_path is not None:
UpperCAmelCase_ : Dict = args.index_path
else:
UpperCAmelCase_ : Tuple = BartForConditionalGeneration
UpperCAmelCase_ : List[Any] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s", __lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = get_scores if args.eval_mode == "e2e" else get_precision_at_k
UpperCAmelCase_ : Tuple = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(__lowerCamelCase, args.predictions_path, args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(__lowerCamelCase ) )
logger.info(" Batch size = %d", args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
UpperCAmelCase_ : Union[str, Any] = RagRetriever.from_pretrained(__lowerCamelCase, **__lowerCamelCase )
UpperCAmelCase_ : Any = model_class.from_pretrained(__lowerCamelCase, retriever=__lowerCamelCase, **__lowerCamelCase )
model.retriever.init_retrieval()
else:
UpperCAmelCase_ : int = model_class.from_pretrained(__lowerCamelCase, **__lowerCamelCase )
model.to(args.device )
with open(args.evaluation_set, "r" ) as eval_file, open(args.predictions_path, "w" ) as preds_file:
UpperCAmelCase_ : List[Any] = []
for line in tqdm(__lowerCamelCase ):
questions.append(line.strip() )
if len(__lowerCamelCase ) == args.eval_batch_size:
UpperCAmelCase_ : Optional[int] = evaluate_batch_fn(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
preds_file.write("\n".join(__lowerCamelCase ) + "\n" )
preds_file.flush()
UpperCAmelCase_ : List[str] = []
if len(__lowerCamelCase ) > 0:
UpperCAmelCase_ : List[Any] = evaluate_batch_fn(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
preds_file.write("\n".join(__lowerCamelCase ) )
preds_file.flush()
score_fn(__lowerCamelCase, args.predictions_path, args.gold_data_path )
if __name__ == "__main__":
_a = get_args()
main(args)
| 352
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_a = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 23
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_a = logging.get_logger(__name__)
def __a ( __lowerCamelCase ):
if isinstance(__lowerCamelCase, (list, tuple) ) and isinstance(videos[0], (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__lowerCamelCase, (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__lowerCamelCase ):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""" )
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = ["""pixel_values"""]
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = True , lowercase_ = None , lowercase_ = True , lowercase_ = 1 / 255 , lowercase_ = True , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ):
"""simple docstring"""
super().__init__(**lowercase_ )
UpperCAmelCase_ : str = size if size is not None else {"shortest_edge": 256}
UpperCAmelCase_ : List[Any] = get_size_dict(lowercase_ , default_to_square=lowercase_ )
UpperCAmelCase_ : str = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ : List[str] = get_size_dict(lowercase_ , param_name="crop_size" )
UpperCAmelCase_ : Any = do_resize
UpperCAmelCase_ : Union[str, Any] = size
UpperCAmelCase_ : Any = do_center_crop
UpperCAmelCase_ : Optional[Any] = crop_size
UpperCAmelCase_ : Any = resample
UpperCAmelCase_ : int = do_rescale
UpperCAmelCase_ : List[Any] = rescale_factor
UpperCAmelCase_ : Optional[int] = offset
UpperCAmelCase_ : Optional[Any] = do_normalize
UpperCAmelCase_ : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = None , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" in size:
UpperCAmelCase_ : Tuple = get_resize_output_image_size(lowercase_ , size["shortest_edge"] , default_to_square=lowercase_ )
elif "height" in size and "width" in size:
UpperCAmelCase_ : Any = (size["height"], size["width"])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(lowercase_ , size=(size["height"], size["width"]) , data_format=lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ = True , lowercase_ = None , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = image.astype(np.floataa )
if offset:
UpperCAmelCase_ : Any = image - (scale / 2)
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ):
"""simple docstring"""
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , ):
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ : str = to_numpy_array(lowercase_ )
if do_resize:
UpperCAmelCase_ : Any = self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ )
if do_center_crop:
UpperCAmelCase_ : Any = self.center_crop(lowercase_ , size=lowercase_ )
if do_rescale:
UpperCAmelCase_ : str = self.rescale(image=lowercase_ , scale=lowercase_ , offset=lowercase_ )
if do_normalize:
UpperCAmelCase_ : List[str] = self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ )
UpperCAmelCase_ : Any = to_channel_dimension_format(lowercase_ , lowercase_ )
return image
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : int = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : Tuple = resample if resample is not None else self.resample
UpperCAmelCase_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : Any = offset if offset is not None else self.offset
UpperCAmelCase_ : int = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : Tuple = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : Optional[int] = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Dict = size if size is not None else self.size
UpperCAmelCase_ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ )
UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : str = get_size_dict(lowercase_ , param_name="crop_size" )
if not valid_images(lowercase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase_ : Optional[Any] = make_batched(lowercase_ )
UpperCAmelCase_ : List[Any] = [
[
self._preprocess_image(
image=lowercase_ , do_resize=lowercase_ , size=lowercase_ , resample=lowercase_ , do_center_crop=lowercase_ , crop_size=lowercase_ , do_rescale=lowercase_ , rescale_factor=lowercase_ , offset=lowercase_ , do_normalize=lowercase_ , image_mean=lowercase_ , image_std=lowercase_ , data_format=lowercase_ , )
for img in video
]
for video in videos
]
UpperCAmelCase_ : Tuple = {"pixel_values": videos}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 353
|
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a = logging.get_logger(__name__) # pylint: disable=invalid-name
_a = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=8 ):
UpperCAmelCase_ : List[str] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase_ : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , )
UpperCAmelCase_ : int = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
if latents is None:
UpperCAmelCase_ : Dict = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
UpperCAmelCase_ : str = latents.to(lowercase_ )
UpperCAmelCase_ : Dict = latents * scheduler.init_noise_sigma
return latents
def UpperCamelCase__ ( self , lowercase_=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCAmelCase_ : Any = torch.device(F"""cuda:{gpu_id}""" )
UpperCAmelCase_ : int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
def UpperCamelCase__ ( self , lowercase_=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
UpperCAmelCase_ : Any = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=lowercase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ : List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase_ , UpperCAmelCase_ : str = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ )
# We'll offload the last model manually.
UpperCAmelCase_ : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase_ )
def __call__( self , lowercase_ , lowercase_ , lowercase_ = 512 , lowercase_ = 512 , lowercase_ = 100 , lowercase_ = 4.0 , lowercase_ = 1 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ):
"""simple docstring"""
UpperCAmelCase_ : str = self._execution_device
UpperCAmelCase_ : List[Any] = guidance_scale > 1.0
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : int = torch.cat(lowercase_ , dim=0 )
UpperCAmelCase_ : Any = image_embeds.shape[0] * num_images_per_prompt
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : List[Any] = torch.cat(lowercase_ , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : Tuple = image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase_ : List[str] = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ )
self.scheduler.set_timesteps(lowercase_ , device=lowercase_ )
UpperCAmelCase_ : List[Any] = self.scheduler.timesteps
UpperCAmelCase_ : List[str] = self.unet.config.in_channels
UpperCAmelCase_ , UpperCAmelCase_ : Dict = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor )
# create initial latent
UpperCAmelCase_ : int = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase_ , lowercase_ , lowercase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : Union[str, Any] = {"image_embeds": image_embeds}
UpperCAmelCase_ : Optional[Any] = self.unet(
sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = noise_pred.chunk(2 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = variance_pred.chunk(2 )
UpperCAmelCase_ : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ : str = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ , UpperCAmelCase_ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : List[str] = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0]
# post-processing
UpperCAmelCase_ : Tuple = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCAmelCase_ : List[Any] = image * 0.5 + 0.5
UpperCAmelCase_ : int = image.clamp(0 , 1 )
UpperCAmelCase_ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : Dict = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 23
| 0
|
"""simple docstring"""
def __a ( __lowerCamelCase ):
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(__lowerCamelCase, __lowerCamelCase ):
raise TypeError("Input value must be a 'int' type" )
return bin(__lowerCamelCase ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354
|
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a = logging.get_logger(__name__)
_a = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = """detr"""
SCREAMING_SNAKE_CASE__ : str = ["""past_key_values"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=100 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=256 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase_ : Union[str, Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : int = backbone_config.get("model_type" )
UpperCAmelCase_ : int = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ : Any = config_class.from_dict(lowercase_ )
# set timm attributes to None
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = None, None, None
UpperCAmelCase_ : int = use_timm_backbone
UpperCAmelCase_ : int = backbone_config
UpperCAmelCase_ : List[Any] = num_channels
UpperCAmelCase_ : int = num_queries
UpperCAmelCase_ : Union[str, Any] = d_model
UpperCAmelCase_ : str = encoder_ffn_dim
UpperCAmelCase_ : Tuple = encoder_layers
UpperCAmelCase_ : List[Any] = encoder_attention_heads
UpperCAmelCase_ : Union[str, Any] = decoder_ffn_dim
UpperCAmelCase_ : Optional[Any] = decoder_layers
UpperCAmelCase_ : Union[str, Any] = decoder_attention_heads
UpperCAmelCase_ : Optional[int] = dropout
UpperCAmelCase_ : List[str] = attention_dropout
UpperCAmelCase_ : Any = activation_dropout
UpperCAmelCase_ : str = activation_function
UpperCAmelCase_ : Tuple = init_std
UpperCAmelCase_ : Optional[Any] = init_xavier_std
UpperCAmelCase_ : Optional[Any] = encoder_layerdrop
UpperCAmelCase_ : Optional[int] = decoder_layerdrop
UpperCAmelCase_ : Tuple = encoder_layers
UpperCAmelCase_ : int = auxiliary_loss
UpperCAmelCase_ : Optional[Any] = position_embedding_type
UpperCAmelCase_ : Tuple = backbone
UpperCAmelCase_ : Optional[int] = use_pretrained_backbone
UpperCAmelCase_ : Dict = dilation
# Hungarian matcher
UpperCAmelCase_ : Union[str, Any] = class_cost
UpperCAmelCase_ : Any = bbox_cost
UpperCAmelCase_ : int = giou_cost
# Loss coefficients
UpperCAmelCase_ : str = mask_loss_coefficient
UpperCAmelCase_ : Any = dice_loss_coefficient
UpperCAmelCase_ : Optional[Any] = bbox_loss_coefficient
UpperCAmelCase_ : List[str] = giou_loss_coefficient
UpperCAmelCase_ : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.d_model
@classmethod
def UpperCamelCase__ ( cls , lowercase_ , **lowercase_ ):
"""simple docstring"""
return cls(backbone_config=lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCAmelCase_ : Union[str, Any] = self.backbone_config.to_dict()
UpperCAmelCase_ : str = self.__class__.model_type
return output
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = version.parse("""1.11""" )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 1E-5
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 12
| 23
| 0
|
"""simple docstring"""
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : int = []
for part_id in partition_order:
UpperCAmelCase_ : int = df.where(f"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(__lowerCamelCase ):
expected_row_ids_and_row_dicts.append((f"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def __a ( ):
UpperCAmelCase_ : List[str] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
UpperCAmelCase_ : Union[str, Any] = spark.range(100 ).repartition(1 )
UpperCAmelCase_ : Union[str, Any] = Spark(__lowerCamelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def __a ( ):
UpperCAmelCase_ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
UpperCAmelCase_ : Union[str, Any] = spark.range(10 ).repartition(2 )
UpperCAmelCase_ : List[Any] = [1, 0]
UpperCAmelCase_ : Optional[Any] = _generate_iterable_examples(__lowerCamelCase, __lowerCamelCase ) # Reverse the partitions.
UpperCAmelCase_ : str = _get_expected_row_ids_and_row_dicts_for_partition_order(__lowerCamelCase, __lowerCamelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
UpperCAmelCase_ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __a ( ):
UpperCAmelCase_ : List[Any] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
UpperCAmelCase_ : str = spark.range(10 ).repartition(1 )
UpperCAmelCase_ : Any = SparkExamplesIterable(__lowerCamelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__lowerCamelCase ):
assert row_id == f"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def __a ( ):
UpperCAmelCase_ : str = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
UpperCAmelCase_ : List[str] = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
UpperCAmelCase_ : Any = lambda __lowerCamelCase : x.reverse()
UpperCAmelCase_ : Union[str, Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(__lowerCamelCase, [2, 1, 0] )
UpperCAmelCase_ : Optional[int] = SparkExamplesIterable(__lowerCamelCase ).shuffle_data_sources(__lowerCamelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__lowerCamelCase ):
UpperCAmelCase_ : List[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __a ( ):
UpperCAmelCase_ : int = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
UpperCAmelCase_ : Union[str, Any] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
UpperCAmelCase_ : List[Any] = SparkExamplesIterable(__lowerCamelCase ).shard_data_sources(worker_id=0, num_workers=2 )
assert shard_it_a.n_shards == 2
UpperCAmelCase_ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(__lowerCamelCase, [0, 2] )
for i, (row_id, row_dict) in enumerate(__lowerCamelCase ):
UpperCAmelCase_ : Dict = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
UpperCAmelCase_ : List[Any] = SparkExamplesIterable(__lowerCamelCase ).shard_data_sources(worker_id=1, num_workers=2 )
assert shard_it_a.n_shards == 2
UpperCAmelCase_ : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(__lowerCamelCase, [1, 3] )
for i, (row_id, row_dict) in enumerate(__lowerCamelCase ):
UpperCAmelCase_ : int = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __a ( ):
UpperCAmelCase_ : int = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
UpperCAmelCase_ : List[str] = spark.range(100 ).repartition(1 )
UpperCAmelCase_ : List[str] = Spark(__lowerCamelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 355
|
"""simple docstring"""
_a = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100_000)]
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Optional[int] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_a = [None] * 10_000_000
_a = True
_a = False
def __a ( __lowerCamelCase ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCAmelCase_ : Dict = chain(next_number(__lowerCamelCase ) )
UpperCAmelCase_ : List[str] = number_chain
while number < 1000_0000:
UpperCAmelCase_ : List[Any] = number_chain
number *= 10
return number_chain
def __a ( __lowerCamelCase = 1000_0000 ):
for i in range(1, __lowerCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 23
| 0
|
"""simple docstring"""
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
_a = logging.get_logger(__name__)
class A_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str
SCREAMING_SNAKE_CASE__ : str = None
@staticmethod
def UpperCamelCase__ ( ):
"""simple docstring"""
raise NotImplementedError
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
raise NotImplementedError
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
raise NotImplementedError
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not self.is_available():
raise RuntimeError(
F"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def UpperCamelCase__ ( cls ):
"""simple docstring"""
return F"""`pip install {cls.pip_package or cls.name}`"""
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = """optuna"""
@staticmethod
def UpperCamelCase__ ( ):
"""simple docstring"""
return is_optuna_available()
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
return run_hp_search_optuna(lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return default_hp_space_optuna(lowercase_ )
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = """ray"""
SCREAMING_SNAKE_CASE__ : List[Any] = """'ray[tune]'"""
@staticmethod
def UpperCamelCase__ ( ):
"""simple docstring"""
return is_ray_available()
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
return run_hp_search_ray(lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return default_hp_space_ray(lowercase_ )
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """sigopt"""
@staticmethod
def UpperCamelCase__ ( ):
"""simple docstring"""
return is_sigopt_available()
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
return run_hp_search_sigopt(lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return default_hp_space_sigopt(lowercase_ )
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = """wandb"""
@staticmethod
def UpperCamelCase__ ( ):
"""simple docstring"""
return is_wandb_available()
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
return run_hp_search_wandb(lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return default_hp_space_wandb(lowercase_ )
_a = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def __a ( ):
UpperCAmelCase_ : List[Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(__lowerCamelCase ) > 0:
UpperCAmelCase_ : Tuple = available_backends[0].name
if len(__lowerCamelCase ) > 1:
logger.info(
f"""{len(__lowerCamelCase )} hyperparameter search backends available. Using {name} as the default.""" )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
f""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 356
|
"""simple docstring"""
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# Return True if there is node that has not iterated.
UpperCAmelCase_ : List[Any] = [False] * len(__lowerCamelCase )
UpperCAmelCase_ : Any = []
queue.append(__lowerCamelCase )
UpperCAmelCase_ : Tuple = True
while queue:
UpperCAmelCase_ : str = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowerCamelCase )
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : Union[str, Any] = u
return visited[t]
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# This array is filled by BFS and to store path
UpperCAmelCase_ : List[str] = [-1] * (len(__lowerCamelCase ))
UpperCAmelCase_ : Any = 0
while bfs(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : int = float("Inf" )
UpperCAmelCase_ : Tuple = sink
while s != source:
# Find the minimum value in select path
UpperCAmelCase_ : Tuple = min(__lowerCamelCase, graph[parent[s]][s] )
UpperCAmelCase_ : Dict = parent[s]
max_flow += path_flow
UpperCAmelCase_ : Optional[Any] = sink
while v != source:
UpperCAmelCase_ : List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCAmelCase_ : Optional[int] = parent[v]
return max_flow
_a = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_a , _a = 0, 5
print(ford_fulkerson(graph, source, sink))
| 23
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
_a = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = """tapas"""
def __init__( self , lowercase_=3_0522 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3072 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=1024 , lowercase_=[3, 256, 256, 2, 256, 256, 10] , lowercase_=0.02 , lowercase_=1E-1_2 , lowercase_=0 , lowercase_=10.0 , lowercase_=0 , lowercase_=1.0 , lowercase_=None , lowercase_=1.0 , lowercase_=False , lowercase_=None , lowercase_=1.0 , lowercase_=1.0 , lowercase_=False , lowercase_=False , lowercase_="ratio" , lowercase_=None , lowercase_=None , lowercase_=64 , lowercase_=32 , lowercase_=False , lowercase_=True , lowercase_=False , lowercase_=False , lowercase_=True , lowercase_=False , lowercase_=None , lowercase_=None , **lowercase_ , ):
"""simple docstring"""
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
UpperCAmelCase_ : Dict = vocab_size
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : int = attention_probs_dropout_prob
UpperCAmelCase_ : Tuple = max_position_embeddings
UpperCAmelCase_ : str = type_vocab_sizes
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : List[Any] = layer_norm_eps
# Fine-tuning task hyperparameters
UpperCAmelCase_ : Any = positive_label_weight
UpperCAmelCase_ : List[Any] = num_aggregation_labels
UpperCAmelCase_ : Any = aggregation_loss_weight
UpperCAmelCase_ : Optional[Any] = use_answer_as_supervision
UpperCAmelCase_ : Optional[int] = answer_loss_importance
UpperCAmelCase_ : str = use_normalized_answer_loss
UpperCAmelCase_ : Dict = huber_loss_delta
UpperCAmelCase_ : List[str] = temperature
UpperCAmelCase_ : Union[str, Any] = aggregation_temperature
UpperCAmelCase_ : List[Any] = use_gumbel_for_cells
UpperCAmelCase_ : Tuple = use_gumbel_for_aggregation
UpperCAmelCase_ : str = average_approximation_function
UpperCAmelCase_ : Dict = cell_selection_preference
UpperCAmelCase_ : Optional[Any] = answer_loss_cutoff
UpperCAmelCase_ : Optional[int] = max_num_rows
UpperCAmelCase_ : Optional[Any] = max_num_columns
UpperCAmelCase_ : Union[str, Any] = average_logits_per_cell
UpperCAmelCase_ : int = select_one_column
UpperCAmelCase_ : List[str] = allow_empty_column_selection
UpperCAmelCase_ : Tuple = init_cell_selection_weights_to_zero
UpperCAmelCase_ : int = reset_position_index_per_cell
UpperCAmelCase_ : Optional[int] = disable_per_token_loss
# Aggregation hyperparameters
UpperCAmelCase_ : int = aggregation_labels
UpperCAmelCase_ : List[Any] = no_aggregation_label_index
if isinstance(self.aggregation_labels , lowercase_ ):
UpperCAmelCase_ : str = {int(lowercase_ ): v for k, v in aggregation_labels.items()}
| 357
|
"""simple docstring"""
import datasets
_a = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n'
_a = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n'
_a = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n'
def __a ( __lowerCamelCase, __lowerCamelCase ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
return {"accuracy": simple_accuracy(lowercase_ , lowercase_ )}
| 23
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 358
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_a = logging.get_logger(__name__)
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = feature_size
UpperCAmelCase_ : Any = sampling_rate
UpperCAmelCase_ : Any = padding_value
UpperCAmelCase_ : str = kwargs.pop("padding_side" , "right" )
UpperCAmelCase_ : List[str] = kwargs.pop("return_attention_mask" , lowercase_ )
super().__init__(**lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = True , lowercase_ = None , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = None , ):
"""simple docstring"""
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(lowercase_ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
UpperCAmelCase_ : Dict = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
UpperCAmelCase_ : Tuple = processed_features[self.model_input_names[0]]
UpperCAmelCase_ : List[str] = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowercase_ ) == 0:
if return_attention_mask:
UpperCAmelCase_ : Union[str, Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
UpperCAmelCase_ : List[str] = required_input[0]
if isinstance(lowercase_ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
UpperCAmelCase_ : Any = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowercase_ ):
UpperCAmelCase_ : Optional[Any] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowercase_ ):
UpperCAmelCase_ : Dict = "tf"
elif is_torch_tensor(lowercase_ ):
UpperCAmelCase_ : Any = "pt"
elif isinstance(lowercase_ , (int, float, list, tuple, np.ndarray) ):
UpperCAmelCase_ : str = "np"
else:
raise ValueError(
F"""type of {first_element} unknown: {type(lowercase_ )}. """
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
UpperCAmelCase_ : Optional[int] = to_numpy(lowercase_ )
else:
UpperCAmelCase_ : List[str] = [to_numpy(lowercase_ ) for v in value]
# Convert padding_strategy in PaddingStrategy
UpperCAmelCase_ : Dict = self._get_padding_strategies(padding=lowercase_ , max_length=lowercase_ )
UpperCAmelCase_ : str = processed_features[self.model_input_names[0]]
UpperCAmelCase_ : int = len(lowercase_ )
if not all(len(lowercase_ ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
UpperCAmelCase_ : int = []
for i in range(lowercase_ ):
UpperCAmelCase_ : str = {k: v[i] for k, v in processed_features.items()}
# truncation
UpperCAmelCase_ : List[str] = self._truncate(
lowercase_ , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , truncation=lowercase_ , )
truncated_inputs.append(lowercase_ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
UpperCAmelCase_ : str = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
UpperCAmelCase_ : Dict = PaddingStrategy.MAX_LENGTH
UpperCAmelCase_ : List[str] = {}
for i in range(lowercase_ ):
# padding
UpperCAmelCase_ : int = self._pad(
truncated_inputs[i] , max_length=lowercase_ , padding_strategy=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , )
for key, value in outputs.items():
if key not in batch_outputs:
UpperCAmelCase_ : Any = []
if value.dtype is np.dtype(np.floataa ):
UpperCAmelCase_ : List[Any] = value.astype(np.floataa )
batch_outputs[key].append(lowercase_ )
return BatchFeature(lowercase_ , tensor_type=lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = PaddingStrategy.DO_NOT_PAD , lowercase_ = None , lowercase_ = None , ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
UpperCAmelCase_ : Tuple = len(lowercase_ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
UpperCAmelCase_ : Tuple = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
UpperCAmelCase_ : Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowercase_ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
UpperCAmelCase_ : Optional[int] = np.ones(len(lowercase_ ) , dtype=np.intaa )
if needs_to_be_padded:
UpperCAmelCase_ : Dict = max_length - len(lowercase_ )
if self.padding_side == "right":
if return_attention_mask:
UpperCAmelCase_ : List[Any] = np.pad(
processed_features["attention_mask"] , (0, difference) )
UpperCAmelCase_ : Dict = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
UpperCAmelCase_ : Optional[Any] = np.pad(
lowercase_ , lowercase_ , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
UpperCAmelCase_ : Optional[Any] = np.pad(
processed_features["attention_mask"] , (difference, 0) )
UpperCAmelCase_ : Dict = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
UpperCAmelCase_ : str = np.pad(
lowercase_ , lowercase_ , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , ):
"""simple docstring"""
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
UpperCAmelCase_ : Optional[int] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
UpperCAmelCase_ : Union[str, Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
UpperCAmelCase_ : Optional[Any] = len(lowercase_ ) > max_length
if needs_to_be_truncated:
UpperCAmelCase_ : int = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
UpperCAmelCase_ : Dict = processed_features["attention_mask"][:max_length]
return processed_features
def UpperCamelCase__ ( self , lowercase_=False , lowercase_=None ):
"""simple docstring"""
# Get padding strategy
if padding is not False:
if padding is True:
UpperCAmelCase_ : Dict = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : Optional[Any] = PaddingStrategy(lowercase_ )
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : int = padding
else:
UpperCAmelCase_ : str = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 23
| 0
|
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a = logging.get_logger(__name__)
_a = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = """detr"""
SCREAMING_SNAKE_CASE__ : str = ["""past_key_values"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=100 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=256 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase_ : Union[str, Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : int = backbone_config.get("model_type" )
UpperCAmelCase_ : int = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ : Any = config_class.from_dict(lowercase_ )
# set timm attributes to None
UpperCAmelCase_ : Optional[Any] = None, None, None
UpperCAmelCase_ : int = use_timm_backbone
UpperCAmelCase_ : int = backbone_config
UpperCAmelCase_ : List[Any] = num_channels
UpperCAmelCase_ : int = num_queries
UpperCAmelCase_ : Union[str, Any] = d_model
UpperCAmelCase_ : str = encoder_ffn_dim
UpperCAmelCase_ : Tuple = encoder_layers
UpperCAmelCase_ : List[Any] = encoder_attention_heads
UpperCAmelCase_ : Union[str, Any] = decoder_ffn_dim
UpperCAmelCase_ : Optional[Any] = decoder_layers
UpperCAmelCase_ : Union[str, Any] = decoder_attention_heads
UpperCAmelCase_ : Optional[int] = dropout
UpperCAmelCase_ : List[str] = attention_dropout
UpperCAmelCase_ : Any = activation_dropout
UpperCAmelCase_ : str = activation_function
UpperCAmelCase_ : Tuple = init_std
UpperCAmelCase_ : Optional[Any] = init_xavier_std
UpperCAmelCase_ : Optional[Any] = encoder_layerdrop
UpperCAmelCase_ : Optional[int] = decoder_layerdrop
UpperCAmelCase_ : Tuple = encoder_layers
UpperCAmelCase_ : int = auxiliary_loss
UpperCAmelCase_ : Optional[Any] = position_embedding_type
UpperCAmelCase_ : Tuple = backbone
UpperCAmelCase_ : Optional[int] = use_pretrained_backbone
UpperCAmelCase_ : Dict = dilation
# Hungarian matcher
UpperCAmelCase_ : Union[str, Any] = class_cost
UpperCAmelCase_ : Any = bbox_cost
UpperCAmelCase_ : int = giou_cost
# Loss coefficients
UpperCAmelCase_ : str = mask_loss_coefficient
UpperCAmelCase_ : Any = dice_loss_coefficient
UpperCAmelCase_ : Optional[Any] = bbox_loss_coefficient
UpperCAmelCase_ : List[str] = giou_loss_coefficient
UpperCAmelCase_ : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.d_model
@classmethod
def UpperCamelCase__ ( cls , lowercase_ , **lowercase_ ):
"""simple docstring"""
return cls(backbone_config=lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCAmelCase_ : Union[str, Any] = self.backbone_config.to_dict()
UpperCAmelCase_ : str = self.__class__.model_type
return output
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = version.parse("""1.11""" )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 1E-5
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 12
| 359
|
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = torch.nn.Linear(10 , 10 )
UpperCAmelCase_ : List[str] = torch.optim.SGD(model.parameters() , 0.1 )
UpperCAmelCase_ : Optional[Any] = Accelerator()
UpperCAmelCase_ : Tuple = accelerator.prepare(lowercase_ )
try:
pickle.loads(pickle.dumps(lowercase_ ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 23
| 0
|
"""simple docstring"""
import random
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Dict = [], [], []
for element in data:
if element < pivot:
less.append(__lowerCamelCase )
elif element > pivot:
greater.append(__lowerCamelCase )
else:
equal.append(__lowerCamelCase )
return less, equal, greater
def __a ( __lowerCamelCase, __lowerCamelCase ):
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(__lowerCamelCase ) or index < 0:
return None
UpperCAmelCase_ : int = items[random.randint(0, len(__lowerCamelCase ) - 1 )]
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : Dict = _partition(__lowerCamelCase, __lowerCamelCase )
UpperCAmelCase_ : int = len(__lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = len(__lowerCamelCase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__lowerCamelCase, __lowerCamelCase )
# must be in larger
else:
return quick_select(__lowerCamelCase, index - (m + count) )
| 360
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = """ctrl"""
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""past_key_values"""]
SCREAMING_SNAKE_CASE__ : List[str] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowercase_=24_6534 , lowercase_=256 , lowercase_=1280 , lowercase_=8192 , lowercase_=48 , lowercase_=16 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=1E-6 , lowercase_=0.02 , lowercase_=True , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : Union[str, Any] = n_positions
UpperCAmelCase_ : List[str] = n_embd
UpperCAmelCase_ : Dict = n_layer
UpperCAmelCase_ : Optional[int] = n_head
UpperCAmelCase_ : List[str] = dff
UpperCAmelCase_ : Tuple = resid_pdrop
UpperCAmelCase_ : Optional[Any] = embd_pdrop
UpperCAmelCase_ : str = layer_norm_epsilon
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : List[str] = use_cache
super().__init__(**lowercase_ )
| 23
| 0
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = []
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
self.events.append("on_init_end" )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
self.events.append("on_train_begin" )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
self.events.append("on_train_end" )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
self.events.append("on_epoch_begin" )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
self.events.append("on_epoch_end" )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
self.events.append("on_step_begin" )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
self.events.append("on_step_end" )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
self.events.append("on_evaluate" )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
self.events.append("on_predict" )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
self.events.append("on_save" )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
self.events.append("on_log" )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
self.events.append("on_prediction_step" )
@require_torch
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = tempfile.mkdtemp()
def UpperCamelCase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.output_dir )
def UpperCamelCase__ ( self , lowercase_=0 , lowercase_=0 , lowercase_=64 , lowercase_=64 , lowercase_=None , lowercase_=False , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = RegressionDataset(length=lowercase_ )
UpperCAmelCase_ : List[str] = RegressionDataset(length=lowercase_ )
UpperCAmelCase_ : Optional[int] = RegressionModelConfig(a=lowercase_ , b=lowercase_ )
UpperCAmelCase_ : Optional[Any] = RegressionPreTrainedModel(lowercase_ )
UpperCAmelCase_ : Dict = TrainingArguments(self.output_dir , disable_tqdm=lowercase_ , report_to=[] , **lowercase_ )
return Trainer(
lowercase_ , lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , callbacks=lowercase_ , )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
# Order doesn't matter
UpperCAmelCase_ : Optional[int] = sorted(lowercase_ , key=lambda lowercase_ : cb.__name__ if isinstance(lowercase_ , lowercase_ ) else cb.__class__.__name__ )
UpperCAmelCase_ : Union[str, Any] = sorted(lowercase_ , key=lambda lowercase_ : cb.__name__ if isinstance(lowercase_ , lowercase_ ) else cb.__class__.__name__ )
for cba, cba in zip(lowercase_ , lowercase_ ):
if isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ ):
self.assertEqual(lowercase_ , lowercase_ )
elif isinstance(lowercase_ , lowercase_ ) and not isinstance(lowercase_ , lowercase_ ):
self.assertEqual(lowercase_ , cba.__class__ )
elif not isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ ):
self.assertEqual(cba.__class__ , lowercase_ )
else:
self.assertEqual(lowercase_ , lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = ["on_init_end", "on_train_begin"]
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : str = len(trainer.get_eval_dataloader() )
UpperCAmelCase_ : str = ["on_prediction_step"] * len(trainer.get_eval_dataloader() ) + ["on_log", "on_evaluate"]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("on_epoch_begin" )
for _ in range(lowercase_ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("on_log" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("on_save" )
expected_events.append("on_epoch_end" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = self.get_trainer()
UpperCAmelCase_ : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
# Callbacks passed at init are added to the default callbacks
UpperCAmelCase_ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
UpperCAmelCase_ : Any = self.get_trainer(disable_tqdm=lowercase_ )
UpperCAmelCase_ : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
UpperCAmelCase_ : List[Any] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowercase_ )
expected_callbacks.remove(lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
UpperCAmelCase_ : Dict = self.get_trainer()
UpperCAmelCase_ : List[Any] = trainer.pop_callback(lowercase_ )
self.assertEqual(cb.__class__ , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
trainer.add_callback(lowercase_ )
expected_callbacks.insert(0 , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
# We can also add, pop, or remove by instance
UpperCAmelCase_ : List[Any] = self.get_trainer()
UpperCAmelCase_ : Optional[Any] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowercase_ )
expected_callbacks.remove(lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
UpperCAmelCase_ : Dict = self.get_trainer()
UpperCAmelCase_ : str = trainer.callback_handler.callbacks[0]
UpperCAmelCase_ : List[str] = trainer.pop_callback(lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
trainer.add_callback(lowercase_ )
expected_callbacks.insert(0 , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="ignore" , category=lowercase_ )
UpperCAmelCase_ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
UpperCAmelCase_ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
# Independent log/save/eval
UpperCAmelCase_ : str = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
UpperCAmelCase_ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
UpperCAmelCase_ : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
UpperCAmelCase_ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
UpperCAmelCase_ : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps" )
trainer.train()
UpperCAmelCase_ : List[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
UpperCAmelCase_ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch" )
trainer.train()
UpperCAmelCase_ : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
# A bit of everything
UpperCAmelCase_ : int = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="steps" , )
trainer.train()
UpperCAmelCase_ : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
# warning should be emitted for duplicated callbacks
with patch("transformers.trainer_callback.logger.warning" ) as warn_mock:
UpperCAmelCase_ : int = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(lowercase_ ) in warn_mock.call_args[0][0]
| 361
|
"""simple docstring"""
def __a ( __lowerCamelCase ):
assert isinstance(__lowerCamelCase, __lowerCamelCase ), f"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
UpperCAmelCase_ : str = f"""The input value of [n={number}] has to be > 0"""
raise ValueError(__lowerCamelCase )
else:
UpperCAmelCase_ : List[str] = sylvester(number - 1 )
UpperCAmelCase_ : List[str] = num - 1
UpperCAmelCase_ : List[str] = num
return lower * upper + 1
if __name__ == "__main__":
print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 23
| 0
|
import math
import sys
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Tuple = ""
try:
with open(__lowerCamelCase, "rb" ) as binary_file:
UpperCAmelCase_ : Union[str, Any] = binary_file.read()
for dat in data:
UpperCAmelCase_ : List[str] = f"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : int = {"0": "0", "1": "1"}
UpperCAmelCase_ : Optional[int] = "", ""
UpperCAmelCase_ : str = len(__lowerCamelCase )
for i in range(len(__lowerCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCAmelCase_ : Optional[Any] = lexicon[curr_string]
result += last_match_id
UpperCAmelCase_ : Union[str, Any] = last_match_id + "0"
if math.loga(__lowerCamelCase ).is_integer():
UpperCAmelCase_ : Optional[int] = {}
for curr_key in list(__lowerCamelCase ):
UpperCAmelCase_ : Dict = lexicon.pop(__lowerCamelCase )
UpperCAmelCase_ : int = new_lex
UpperCAmelCase_ : List[str] = last_match_id + "1"
index += 1
UpperCAmelCase_ : str = ""
return result
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Optional[Any] = 8
try:
with open(__lowerCamelCase, "wb" ) as opened_file:
UpperCAmelCase_ : Optional[Any] = [
to_write[i : i + byte_length]
for i in range(0, len(__lowerCamelCase ), __lowerCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(__lowerCamelCase, 2 ).to_bytes(1, byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Tuple = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
UpperCAmelCase_ : int = data_bits[counter:]
UpperCAmelCase_ : Optional[int] = data_bits[counter + 1 :]
return data_bits
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : List[Any] = read_file_binary(__lowerCamelCase )
UpperCAmelCase_ : str = remove_prefix(__lowerCamelCase )
UpperCAmelCase_ : Any = decompress_data(__lowerCamelCase )
write_file_binary(__lowerCamelCase, __lowerCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 362
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class A_ (lowercase__ ,lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = IFImgaImgSuperResolutionPipeline
SCREAMING_SNAKE_CASE__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""}
SCREAMING_SNAKE_CASE__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} )
SCREAMING_SNAKE_CASE__ : List[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def UpperCamelCase__ ( self , lowercase_ , lowercase_=0 ):
"""simple docstring"""
if str(lowercase_ ).startswith("mps" ):
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(lowercase_ )
else:
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
UpperCAmelCase_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
UpperCAmelCase_ : Optional[int] = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
UpperCAmelCase_ : int = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_save_load_local()
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 23
| 0
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=99 , lowercase_=32 , lowercase_=5 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=16 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ):
"""simple docstring"""
UpperCAmelCase_ : str = parent
UpperCAmelCase_ : Dict = batch_size
UpperCAmelCase_ : Optional[int] = seq_length
UpperCAmelCase_ : int = is_training
UpperCAmelCase_ : str = use_input_mask
UpperCAmelCase_ : List[str] = use_token_type_ids
UpperCAmelCase_ : Optional[int] = use_labels
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_act
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : int = max_position_embeddings
UpperCAmelCase_ : int = type_vocab_size
UpperCAmelCase_ : Any = type_sequence_label_size
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : Optional[Any] = num_labels
UpperCAmelCase_ : List[str] = num_choices
UpperCAmelCase_ : int = scope
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : int = None
if self.use_token_type_ids:
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : List[Any] = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : str = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , use_stable_embedding=lowercase_ , )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = OpenLlamaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : Tuple = model(lowercase_ , attention_mask=lowercase_ )
UpperCAmelCase_ : int = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : Optional[int] = OpenLlamaModel(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )
UpperCAmelCase_ : Optional[int] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , )
UpperCAmelCase_ : Tuple = model(lowercase_ , attention_mask=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : str = OpenLlamaForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : Optional[int] = OpenLlamaForCausalLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
# first forward pass
UpperCAmelCase_ : int = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , )
UpperCAmelCase_ : int = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_ : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase_ : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_ : Tuple = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase_ : Union[str, Any] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )["hidden_states"][0]
UpperCAmelCase_ : List[Any] = model(
lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )["hidden_states"][0]
# select random slice
UpperCAmelCase_ : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_ : int = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_ : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
(
UpperCAmelCase_
) : Optional[Any] = config_and_inputs
UpperCAmelCase_ : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A_ (lowercase__ ,lowercase__ ,lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE__ : Optional[int] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Any = (
{
"""feature-extraction""": OpenLlamaModel,
"""text-classification""": OpenLlamaForSequenceClassification,
"""text-generation""": OpenLlamaForCausalLM,
"""zero-shot""": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : List[Any] = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = OpenLlamaModelTester(self )
UpperCAmelCase_ : Union[str, Any] = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : Optional[Any] = type
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = 3
UpperCAmelCase_ : Optional[Any] = input_dict["input_ids"]
UpperCAmelCase_ : Union[str, Any] = input_ids.ne(1 ).to(lowercase_ )
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase_ : str = OpenLlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : int = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : str = 3
UpperCAmelCase_ : str = "single_label_classification"
UpperCAmelCase_ : Any = input_dict["input_ids"]
UpperCAmelCase_ : int = input_ids.ne(1 ).to(lowercase_ )
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase_ : Optional[Any] = OpenLlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : int = 3
UpperCAmelCase_ : Union[str, Any] = "multi_label_classification"
UpperCAmelCase_ : Dict = input_dict["input_ids"]
UpperCAmelCase_ : Tuple = input_ids.ne(1 ).to(lowercase_ )
UpperCAmelCase_ : Tuple = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase_ : List[str] = OpenLlamaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("Open-Llama buffers include complex numbers, which breaks this test" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Dict = ids_tensor([1, 10] , config.vocab_size )
UpperCAmelCase_ : List[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_ : Union[str, Any] = OpenLlamaModel(lowercase_ )
original_model.to(lowercase_ )
original_model.eval()
UpperCAmelCase_ : Optional[int] = original_model(lowercase_ ).last_hidden_state
UpperCAmelCase_ : Optional[int] = original_model(lowercase_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_ : Optional[int] = {"type": scaling_type, "factor": 10.0}
UpperCAmelCase_ : List[Any] = OpenLlamaModel(lowercase_ )
scaled_model.to(lowercase_ )
scaled_model.eval()
UpperCAmelCase_ : Tuple = scaled_model(lowercase_ ).last_hidden_state
UpperCAmelCase_ : Any = scaled_model(lowercase_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) )
| 363
|
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = "ylacombe/bark-small"
UpperCAmelCase_ : Union[str, Any] = tempfile.mkdtemp()
UpperCAmelCase_ : List[str] = "en_speaker_1"
UpperCAmelCase_ : Tuple = "This is a test string"
UpperCAmelCase_ : List[Any] = "speaker_embeddings_path.json"
UpperCAmelCase_ : Any = "speaker_embeddings"
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.get_tokenizer()
UpperCAmelCase_ : Union[str, Any] = BarkProcessor(tokenizer=lowercase_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
UpperCAmelCase_ : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCAmelCase_ : Union[str, Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
UpperCAmelCase_ : int = 35
UpperCAmelCase_ : Optional[Any] = 2
UpperCAmelCase_ : List[Any] = 8
UpperCAmelCase_ : Optional[Any] = {
"semantic_prompt": np.ones(lowercase_ ),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ),
"fine_prompt": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCAmelCase_ : Dict = processor(text=self.input_string , voice_preset=lowercase_ )
UpperCAmelCase_ : List[str] = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , "file.npz" )
np.savez(lowercase_ , **lowercase_ )
UpperCAmelCase_ : Optional[int] = processor(text=self.input_string , voice_preset=lowercase_ )
UpperCAmelCase_ : List[str] = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCAmelCase_ : Tuple = processor(text=self.input_string , voice_preset=self.voice_preset )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.get_tokenizer()
UpperCAmelCase_ : Optional[Any] = BarkProcessor(tokenizer=lowercase_ )
UpperCAmelCase_ : Tuple = processor(text=self.input_string )
UpperCAmelCase_ : Union[str, Any] = tokenizer(
self.input_string , padding="max_length" , max_length=256 , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 23
| 0
|
"""simple docstring"""
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
_a = (
'This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'
)
def __a ( __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
return (preds == labels).mean()
def __a ( __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
UpperCAmelCase_ : Optional[Any] = simple_accuracy(__lowerCamelCase, __lowerCamelCase )
UpperCAmelCase_ : List[Any] = fa_score(y_true=__lowerCamelCase, y_pred=__lowerCamelCase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def __a ( __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
UpperCAmelCase_ : Any = pearsonr(__lowerCamelCase, __lowerCamelCase )[0]
UpperCAmelCase_ : Optional[Any] = spearmanr(__lowerCamelCase, __lowerCamelCase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
assert len(__lowerCamelCase ) == len(__lowerCamelCase ), f"""Predictions and labels have mismatched lengths {len(__lowerCamelCase )} and {len(__lowerCamelCase )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "mrpc":
return acc_and_fa(__lowerCamelCase, __lowerCamelCase )
elif task_name == "sts-b":
return pearson_and_spearman(__lowerCamelCase, __lowerCamelCase )
elif task_name == "qqp":
return acc_and_fa(__lowerCamelCase, __lowerCamelCase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "rte":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "hans":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
else:
raise KeyError(__lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError(f"""Predictions and labels have mismatched lengths {len(__lowerCamelCase )} and {len(__lowerCamelCase )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
else:
raise KeyError(__lowerCamelCase )
| 364
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def __a ( __lowerCamelCase, __lowerCamelCase=False ):
UpperCAmelCase_ : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
UpperCAmelCase_ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase_ : int = ""
else:
UpperCAmelCase_ : Union[str, Any] = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase_ : Tuple = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase_ : Dict = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase_ : Any = in_proj_bias[: config.hidden_size]
UpperCAmelCase_ : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase_ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase_ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase_ : str = in_proj_bias[-config.hidden_size :]
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Tuple = dct.pop(__lowerCamelCase )
UpperCAmelCase_ : Tuple = val
def __a ( ):
UpperCAmelCase_ : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ : str = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : List[str] = DeiTConfig()
# all deit models have fine-tuned heads
UpperCAmelCase_ : Optional[int] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase_ : Tuple = 1000
UpperCAmelCase_ : str = "huggingface/label-files"
UpperCAmelCase_ : str = "imagenet-1k-id2label.json"
UpperCAmelCase_ : List[Any] = json.load(open(hf_hub_download(__lowerCamelCase, __lowerCamelCase, repo_type="dataset" ), "r" ) )
UpperCAmelCase_ : List[str] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
UpperCAmelCase_ : Any = idalabel
UpperCAmelCase_ : int = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : Any = int(deit_name[-6:-4] )
UpperCAmelCase_ : Dict = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
UpperCAmelCase_ : Any = 192
UpperCAmelCase_ : Union[str, Any] = 768
UpperCAmelCase_ : Union[str, Any] = 12
UpperCAmelCase_ : int = 3
elif deit_name[9:].startswith("small" ):
UpperCAmelCase_ : List[str] = 384
UpperCAmelCase_ : List[str] = 1536
UpperCAmelCase_ : Dict = 12
UpperCAmelCase_ : Any = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
UpperCAmelCase_ : int = 1024
UpperCAmelCase_ : List[Any] = 4096
UpperCAmelCase_ : Optional[int] = 24
UpperCAmelCase_ : int = 16
# load original model from timm
UpperCAmelCase_ : Union[str, Any] = timm.create_model(__lowerCamelCase, pretrained=__lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase_ : Optional[Any] = timm_model.state_dict()
UpperCAmelCase_ : Tuple = create_rename_keys(__lowerCamelCase, __lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
read_in_q_k_v(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# load HuggingFace model
UpperCAmelCase_ : str = DeiTForImageClassificationWithTeacher(__lowerCamelCase ).eval()
model.load_state_dict(__lowerCamelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
UpperCAmelCase_ : Union[str, Any] = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
UpperCAmelCase_ : Optional[Any] = DeiTImageProcessor(size=__lowerCamelCase, crop_size=config.image_size )
UpperCAmelCase_ : Any = image_processor(images=prepare_img(), return_tensors="pt" )
UpperCAmelCase_ : int = encoding["pixel_values"]
UpperCAmelCase_ : Optional[Any] = model(__lowerCamelCase )
UpperCAmelCase_ : Any = timm_model(__lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCamelCase, outputs.logits, atol=1E-3 )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_a = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 23
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
_a = logging.get_logger(__name__)
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
warnings.warn(
"The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PoolFormerImageProcessor instead." , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 365
|
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
UpperCAmelCase_ : Optional[Any] = FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowercase_ , cache_dir=lowercase_ )
UpperCAmelCase_ : List[Any] = [t[-1] for t in os.walk(os.path.join(lowercase_ , os.listdir(lowercase_ )[0] , "snapshots" ) )]
UpperCAmelCase_ : Dict = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin" ) for f in files )
@slow
@require_flax
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowercase_ )
UpperCAmelCase_ : Tuple = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : List[Any] = jax.random.PRNGKey(0 )
UpperCAmelCase_ : List[str] = 4
UpperCAmelCase_ : Tuple = jax.device_count()
UpperCAmelCase_ : Optional[int] = num_samples * [prompt]
UpperCAmelCase_ : List[Any] = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
UpperCAmelCase_ : int = replicate(lowercase_ )
UpperCAmelCase_ : str = jax.random.split(lowercase_ , lowercase_ )
UpperCAmelCase_ : List[str] = shard(lowercase_ )
UpperCAmelCase_ : Dict = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1E-3
assert np.abs(np.abs(lowercase_ , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5E-1
UpperCAmelCase_ : List[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowercase_ ) == num_samples
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=lowercase_ )
UpperCAmelCase_ : Optional[int] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : str = jax.random.PRNGKey(0 )
UpperCAmelCase_ : Union[str, Any] = 50
UpperCAmelCase_ : List[str] = jax.device_count()
UpperCAmelCase_ : List[str] = num_samples * [prompt]
UpperCAmelCase_ : Union[str, Any] = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
UpperCAmelCase_ : Any = replicate(lowercase_ )
UpperCAmelCase_ : List[str] = jax.random.split(lowercase_ , lowercase_ )
UpperCAmelCase_ : List[str] = shard(lowercase_ )
UpperCAmelCase_ : int = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1E-3
assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ )
UpperCAmelCase_ : Any = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : str = jax.random.PRNGKey(0 )
UpperCAmelCase_ : str = 50
UpperCAmelCase_ : List[str] = jax.device_count()
UpperCAmelCase_ : List[Any] = num_samples * [prompt]
UpperCAmelCase_ : Any = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
UpperCAmelCase_ : Dict = replicate(lowercase_ )
UpperCAmelCase_ : str = jax.random.split(lowercase_ , lowercase_ )
UpperCAmelCase_ : Union[str, Any] = shard(lowercase_ )
UpperCAmelCase_ : List[Any] = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : str = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa )
UpperCAmelCase_ : List[Any] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : Dict = jax.random.PRNGKey(0 )
UpperCAmelCase_ : Optional[int] = 50
UpperCAmelCase_ : Optional[int] = jax.device_count()
UpperCAmelCase_ : str = num_samples * [prompt]
UpperCAmelCase_ : int = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
UpperCAmelCase_ : Union[str, Any] = replicate(lowercase_ )
UpperCAmelCase_ : Union[str, Any] = jax.random.split(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[int] = shard(lowercase_ )
UpperCAmelCase_ : Any = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , set_alpha_to_one=lowercase_ , steps_offset=1 , )
UpperCAmelCase_ , UpperCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=lowercase_ , safety_checker=lowercase_ , )
UpperCAmelCase_ : List[Any] = scheduler.create_state()
UpperCAmelCase_ : int = scheduler_state
UpperCAmelCase_ : Union[str, Any] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : Optional[Any] = jax.random.PRNGKey(0 )
UpperCAmelCase_ : int = 50
UpperCAmelCase_ : str = jax.device_count()
UpperCAmelCase_ : List[Any] = num_samples * [prompt]
UpperCAmelCase_ : int = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
UpperCAmelCase_ : int = replicate(lowercase_ )
UpperCAmelCase_ : List[str] = jax.random.split(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[Any] = shard(lowercase_ )
UpperCAmelCase_ : Any = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1E-3
assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : List[str] = jax.device_count()
UpperCAmelCase_ : List[Any] = num_samples * [prompt]
UpperCAmelCase_ : Union[str, Any] = jax.random.split(jax.random.PRNGKey(0 ) , lowercase_ )
UpperCAmelCase_ , UpperCAmelCase_ : str = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ , )
UpperCAmelCase_ : Any = replicate(lowercase_ )
UpperCAmelCase_ : List[str] = pipeline.prepare_inputs(lowercase_ )
UpperCAmelCase_ : List[str] = shard(lowercase_ )
UpperCAmelCase_ : List[Any] = pipeline(lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
UpperCAmelCase_ : int = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
UpperCAmelCase_ , UpperCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ , use_memory_efficient_attention=lowercase_ , )
UpperCAmelCase_ : str = replicate(lowercase_ )
UpperCAmelCase_ : str = pipeline.prepare_inputs(lowercase_ )
UpperCAmelCase_ : Optional[int] = shard(lowercase_ )
UpperCAmelCase_ : str = pipeline(lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
UpperCAmelCase_ : Optional[int] = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 23
| 0
|
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
UpperCAmelCase_ : Tuple = b * b - 4 * a * c
UpperCAmelCase_ : List[Any] = (-b + sqrt(__lowerCamelCase )) / (2 * a)
UpperCAmelCase_ : Tuple = (-b - sqrt(__lowerCamelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def __a ( ):
UpperCAmelCase_ : Optional[Any] = quadratic_roots(a=5, b=6, c=1 )
print(f"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 366
|
"""simple docstring"""
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_a = 0
_a = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_a = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_a = tuple[int, int]
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : int = pos_x
UpperCAmelCase_ : List[Any] = pos_y
UpperCAmelCase_ : Union[str, Any] = (pos_y, pos_x)
UpperCAmelCase_ : Any = goal_x
UpperCAmelCase_ : Dict = goal_y
UpperCAmelCase_ : Any = g_cost
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : int = self.calculate_heuristic()
UpperCAmelCase_ : Any = self.g_cost + self.h_cost
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.pos_x - self.goal_x
UpperCAmelCase_ : Union[str, Any] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowercase_ ) + abs(lowercase_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , lowercase_ ):
"""simple docstring"""
return self.f_cost < other.f_cost
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowercase_ )
UpperCAmelCase_ : List[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , lowercase_ )
UpperCAmelCase_ : str = [self.start]
UpperCAmelCase_ : list[Node] = []
UpperCAmelCase_ : int = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCAmelCase_ : List[str] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowercase_ )
self.closed_nodes.append(lowercase_ )
UpperCAmelCase_ : str = self.get_successors(lowercase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowercase_ )
else:
# retrieve the best current path
UpperCAmelCase_ : Union[str, Any] = self.open_nodes.pop(self.open_nodes.index(lowercase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowercase_ )
else:
self.open_nodes.append(lowercase_ )
return [self.start.pos]
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = []
for action in delta:
UpperCAmelCase_ : str = parent.pos_x + action[1]
UpperCAmelCase_ : int = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowercase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowercase_ , lowercase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowercase_ , ) )
return successors
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = node
UpperCAmelCase_ : int = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCAmelCase_ : Optional[int] = current_node.parent
path.reverse()
return path
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = AStar(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[Any] = AStar(lowercase_ , lowercase_ )
UpperCAmelCase_ : Tuple = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
UpperCAmelCase_ : List[str] = self.fwd_astar.open_nodes.pop(0 )
UpperCAmelCase_ : List[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowercase_ , lowercase_ )
self.fwd_astar.closed_nodes.append(lowercase_ )
self.bwd_astar.closed_nodes.append(lowercase_ )
UpperCAmelCase_ : Tuple = current_bwd_node
UpperCAmelCase_ : str = current_fwd_node
UpperCAmelCase_ : Dict = {
self.fwd_astar: self.fwd_astar.get_successors(lowercase_ ),
self.bwd_astar: self.bwd_astar.get_successors(lowercase_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowercase_ )
else:
# retrieve the best current path
UpperCAmelCase_ : List[Any] = astar.open_nodes.pop(
astar.open_nodes.index(lowercase_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowercase_ )
else:
astar.open_nodes.append(lowercase_ )
return [self.fwd_astar.start.pos]
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.fwd_astar.retrace_path(lowercase_ )
UpperCAmelCase_ : int = self.bwd_astar.retrace_path(lowercase_ )
bwd_path.pop()
bwd_path.reverse()
UpperCAmelCase_ : Any = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_a = (0, 0)
_a = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_a = time.time()
_a = AStar(init, goal)
_a = a_star.search()
_a = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
_a = time.time()
_a = BidirectionalAStar(init, goal)
_a = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 23
| 0
|
"""simple docstring"""
import cmath
import math
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : int = math.radians(__lowerCamelCase )
UpperCAmelCase_ : Tuple = math.radians(__lowerCamelCase )
# Convert voltage and current to rectangular form
UpperCAmelCase_ : Optional[Any] = cmath.rect(__lowerCamelCase, __lowerCamelCase )
UpperCAmelCase_ : Optional[int] = cmath.rect(__lowerCamelCase, __lowerCamelCase )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367
|
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = (PNDMScheduler,)
SCREAMING_SNAKE_CASE__ : str = (("""num_inference_steps""", 50),)
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : int = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**lowercase_ )
return config
def UpperCamelCase__ ( self , lowercase_=0 , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = dict(self.forward_default_kwargs )
UpperCAmelCase_ : List[str] = kwargs.pop("num_inference_steps" , lowercase_ )
UpperCAmelCase_ : Union[str, Any] = self.dummy_sample
UpperCAmelCase_ : Dict = 0.1 * sample
UpperCAmelCase_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : List[Any] = self.get_scheduler_config(**lowercase_ )
UpperCAmelCase_ : Dict = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
UpperCAmelCase_ : List[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
UpperCAmelCase_ : Optional[int] = scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
UpperCAmelCase_ : int = dummy_past_residuals[:]
UpperCAmelCase_ : List[str] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : str = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ : Optional[int] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Dict = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self , lowercase_=0 , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = dict(self.forward_default_kwargs )
UpperCAmelCase_ : str = kwargs.pop("num_inference_steps" , lowercase_ )
UpperCAmelCase_ : Optional[int] = self.dummy_sample
UpperCAmelCase_ : List[str] = 0.1 * sample
UpperCAmelCase_ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : str = self.get_scheduler_config()
UpperCAmelCase_ : Dict = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ : List[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
UpperCAmelCase_ : Dict = scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ : Optional[Any] = dummy_past_residuals[:]
UpperCAmelCase_ : Union[str, Any] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Dict = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : int = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = self.scheduler_classes[0]
UpperCAmelCase_ : Union[str, Any] = self.get_scheduler_config(**lowercase_ )
UpperCAmelCase_ : List[Any] = scheduler_class(**lowercase_ )
UpperCAmelCase_ : Tuple = 10
UpperCAmelCase_ : List[str] = self.dummy_model()
UpperCAmelCase_ : str = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase_ : Tuple = model(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[int] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase_ : Any = model(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = dict(self.forward_default_kwargs )
UpperCAmelCase_ : Optional[Any] = kwargs.pop("num_inference_steps" , lowercase_ )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : Any = self.get_scheduler_config()
UpperCAmelCase_ : Tuple = scheduler_class(**lowercase_ )
UpperCAmelCase_ : str = self.dummy_sample
UpperCAmelCase_ : List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase_ , "set_timesteps" ):
scheduler.set_timesteps(lowercase_ )
elif num_inference_steps is not None and not hasattr(lowercase_ , "set_timesteps" ):
UpperCAmelCase_ : List[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase_ : List[str] = dummy_past_residuals[:]
UpperCAmelCase_ : str = scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Any = scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase_ )
UpperCAmelCase_ : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase_ : int = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase_ : Optional[Any] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase_ : List[Any] = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : List[Any] = self.dummy_sample
UpperCAmelCase_ : Optional[int] = 0.1 * sample
UpperCAmelCase_ : List[str] = self.get_scheduler_config()
UpperCAmelCase_ : List[str] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase_ : List[str] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
with self.assertRaises(lowercase_ ):
UpperCAmelCase_ : List[str] = self.scheduler_classes[0]
UpperCAmelCase_ : str = self.get_scheduler_config()
UpperCAmelCase_ : Tuple = scheduler_class(**lowercase_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.full_loop()
UpperCAmelCase_ : Any = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Dict = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_98.13_18 ) < 1E-2
assert abs(result_mean.item() - 0.25_80 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.full_loop(prediction_type="v_prediction" )
UpperCAmelCase_ : str = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Tuple = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 67.39_86 ) < 1E-2
assert abs(result_mean.item() - 0.08_78 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : Union[str, Any] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
UpperCAmelCase_ : List[Any] = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : int = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 2_30.03_99 ) < 1E-2
assert abs(result_mean.item() - 0.29_95 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : Tuple = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
UpperCAmelCase_ : int = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Tuple = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_86.94_82 ) < 1E-2
assert abs(result_mean.item() - 0.24_34 ) < 1E-3
| 23
| 0
|
import enum
import shutil
import sys
_a , _a = shutil.get_terminal_size()
_a = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class A_ (enum.Enum ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
def __a ( __lowerCamelCase, __lowerCamelCase="" ):
sys.stdout.write(str(__lowerCamelCase ) + end )
sys.stdout.flush()
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase="" ):
forceWrite(f"""\u001b[{color}m{content}\u001b[0m""", __lowerCamelCase )
def __a ( ):
forceWrite("\r" )
def __a ( __lowerCamelCase, __lowerCamelCase ):
forceWrite(f"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" )
def __a ( ):
forceWrite(" " * TERMINAL_WIDTH )
reset_cursor()
def __a ( ):
reset_cursor()
forceWrite("-" * TERMINAL_WIDTH )
| 368
|
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_a = object()
# For specifying empty leaf dict `{}`
_a = object()
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Any = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(__lowerCamelCase ) - len(__lowerCamelCase ) + 1 ):
UpperCAmelCase_ : List[str] = [x.match(__lowerCamelCase ) for x, y in zip(__lowerCamelCase, ks[i:] )]
if matches and all(__lowerCamelCase ):
return True
return False
def __a ( __lowerCamelCase ):
def replace(__lowerCamelCase, __lowerCamelCase ):
for rule, replacement in rules:
if _match(__lowerCamelCase, __lowerCamelCase ):
return replacement
return val
return replace
def __a ( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp", __lowerCamelCase )),
(("transformer", "wte", "embedding"), P("mp", __lowerCamelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCamelCase, "mp" )),
(("attention", "out_proj", "kernel"), P("mp", __lowerCamelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__lowerCamelCase, "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp", __lowerCamelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : List[str] = _get_partition_rules()
UpperCAmelCase_ : Any = _replacement_rules(__lowerCamelCase )
UpperCAmelCase_ : Any = {k: _unmatched for k in flatten_dict(__lowerCamelCase )}
UpperCAmelCase_ : Dict = {k: replace(__lowerCamelCase, __lowerCamelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__lowerCamelCase ) )
| 23
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_a = None
_a = logging.get_logger(__name__)
_a = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
_a = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
_a = {
'camembert-base': 512,
}
_a = '▁'
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE__ : str = CamembertTokenizer
def __init__( self , lowercase_=None , lowercase_=None , lowercase_="<s>" , lowercase_="</s>" , lowercase_="</s>" , lowercase_="<s>" , lowercase_="<unk>" , lowercase_="<pad>" , lowercase_="<mask>" , lowercase_=["<s>NOTUSED", "</s>NOTUSED"] , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : str = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , )
UpperCAmelCase_ : Any = vocab_file
UpperCAmelCase_ : int = False if not self.vocab_file else True
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : str = [self.cls_token_id]
UpperCAmelCase_ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = [self.sep_token_id]
UpperCAmelCase_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowercase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ : Any = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
return (out_vocab_file,)
| 369
|
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_a = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = [file for file in os.listdir(lowercase_ ) if os.path.isfile(os.path.join(lowercase_ , lowercase_ ) )]
if identifier is not None:
UpperCAmelCase_ : Dict = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase_ , lowercase_ ):
for n_ in n_identifier:
UpperCAmelCase_ : str = [file for file in files if n_ not in file]
else:
UpperCAmelCase_ : Any = [file for file in files if n_identifier not in file]
UpperCAmelCase_ : Union[str, Any] = ignore_files or []
ignore_files.append("__init__.py" )
UpperCAmelCase_ : Optional[int] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , lowercase_ )
if only_modules:
UpperCAmelCase_ : str = file.split("." )[0]
try:
UpperCAmelCase_ : str = getattr(lowercase_ , lowercase_ )
UpperCAmelCase_ : Tuple = doctest.DocTestSuite(lowercase_ )
UpperCAmelCase_ : int = unittest.TextTestRunner().run(lowercase_ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
UpperCAmelCase_ : Optional[Any] = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = Path("src/transformers" )
UpperCAmelCase_ : str = "modeling"
UpperCAmelCase_ : Optional[Any] = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = Path("src/transformers" )
UpperCAmelCase_ : Any = "tokenization"
self.analyze_directory(lowercase_ , identifier=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = Path("src/transformers" )
UpperCAmelCase_ : List[Any] = "configuration"
self.analyze_directory(lowercase_ , identifier=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = Path("src/transformers" )
UpperCAmelCase_ : List[Any] = ["configuration", "modeling", "tokenization"]
self.analyze_directory(lowercase_ , n_identifier=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = Path("docs/source" )
UpperCAmelCase_ : Union[str, Any] = ["favicon.ico"]
self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_ )
| 23
| 0
|
"""simple docstring"""
import os
import sys
import unittest
_a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_a = os.path.join(git_repo_path, 'src', 'diffusers')
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = find_backend(" if not is_torch_available():" )
self.assertEqual(lowercase_ , "torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
UpperCAmelCase_ : List[str] = find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(lowercase_ , "torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
UpperCAmelCase_ : Any = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(lowercase_ , "torch_and_transformers_and_onnx" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , lowercase_ )
self.assertIn("torch_and_transformers" , lowercase_ )
self.assertIn("flax_and_transformers" , lowercase_ )
self.assertIn("torch_and_transformers_and_onnx" , lowercase_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"] )
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(lowercase_ , "\nCONSTANT = None\n" )
UpperCAmelCase_ : Union[str, Any] = create_dummy_object("function" , "'torch'" )
self.assertEqual(
lowercase_ , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
UpperCAmelCase_ : List[Any] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
UpperCAmelCase_ : List[str] = create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(lowercase_ , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
UpperCAmelCase_ : int = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , lowercase_ )
| 370
|
"""simple docstring"""
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
_a = (
'This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'
)
def __a ( __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
return (preds == labels).mean()
def __a ( __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
UpperCAmelCase_ : Optional[Any] = simple_accuracy(__lowerCamelCase, __lowerCamelCase )
UpperCAmelCase_ : List[Any] = fa_score(y_true=__lowerCamelCase, y_pred=__lowerCamelCase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def __a ( __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
UpperCAmelCase_ : Any = pearsonr(__lowerCamelCase, __lowerCamelCase )[0]
UpperCAmelCase_ : Optional[Any] = spearmanr(__lowerCamelCase, __lowerCamelCase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
assert len(__lowerCamelCase ) == len(__lowerCamelCase ), f"""Predictions and labels have mismatched lengths {len(__lowerCamelCase )} and {len(__lowerCamelCase )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "mrpc":
return acc_and_fa(__lowerCamelCase, __lowerCamelCase )
elif task_name == "sts-b":
return pearson_and_spearman(__lowerCamelCase, __lowerCamelCase )
elif task_name == "qqp":
return acc_and_fa(__lowerCamelCase, __lowerCamelCase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "rte":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "hans":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
else:
raise KeyError(__lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError(f"""Predictions and labels have mismatched lengths {len(__lowerCamelCase )} and {len(__lowerCamelCase )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
else:
raise KeyError(__lowerCamelCase )
| 23
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_a = {'configuration_glpn': ['GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GLPNConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ['GLPNFeatureExtractor']
_a = ['GLPNImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'GLPN_PRETRAINED_MODEL_ARCHIVE_LIST',
'GLPNForDepthEstimation',
'GLPNLayer',
'GLPNModel',
'GLPNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 371
|
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {'vocab_file': 'vocab.json'}
_a = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
_a = {'mgp-str': 27}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowercase_ , lowercase_="[GO]" , lowercase_="[GO]" , lowercase_="[s]" , lowercase_="[GO]" , **lowercase_ ):
"""simple docstring"""
super().__init__(
unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , pad_token=lowercase_ , **lowercase_ , )
with open(lowercase_ , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase_ : Dict = json.load(lowercase_ )
UpperCAmelCase_ : Dict = {v: k for k, v in self.vocab.items()}
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.vocab )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = []
for s in text:
char_tokens.extend(lowercase_ )
return char_tokens
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return self.vocab.get(lowercase_ , self.vocab.get(self.unk_token ) )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return self.decoder.get(lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
if not os.path.isdir(lowercase_ ):
logger.error("Vocabulary path ({}) should be a directory".format(lowercase_ ) )
return
UpperCAmelCase_ : Optional[int] = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(lowercase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowercase_ , ensure_ascii=lowercase_ ) + "\n" )
return (vocab_file,)
| 23
| 0
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
def __a ( *__lowerCamelCase, **__lowerCamelCase ):
requires_backends(__lowerCamelCase, ["torch"] )
def __a ( *__lowerCamelCase, **__lowerCamelCase ):
requires_backends(__lowerCamelCase, ["torch"] )
def __a ( *__lowerCamelCase, **__lowerCamelCase ):
requires_backends(__lowerCamelCase, ["torch"] )
def __a ( *__lowerCamelCase, **__lowerCamelCase ):
requires_backends(__lowerCamelCase, ["torch"] )
def __a ( *__lowerCamelCase, **__lowerCamelCase ):
requires_backends(__lowerCamelCase, ["torch"] )
def __a ( *__lowerCamelCase, **__lowerCamelCase ):
requires_backends(__lowerCamelCase, ["torch"] )
def __a ( *__lowerCamelCase, **__lowerCamelCase ):
requires_backends(__lowerCamelCase, ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class A_ (metaclass=lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = ["""torch"""]
def __init__( self , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def UpperCamelCase__ ( cls , *lowercase_ , **lowercase_ ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
| 350
|
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_a = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
_a = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
_a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __a ( __lowerCamelCase ):
return x[0]
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = get_letter_count(__lowerCamelCase )
UpperCAmelCase_ : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(__lowerCamelCase )
UpperCAmelCase_ : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find, reverse=__lowerCamelCase )
UpperCAmelCase_ : Any = "".join(freq_to_letter[freq] )
UpperCAmelCase_ : str = list(freq_to_letter_str.items() )
freq_pairs.sort(key=__lowerCamelCase, reverse=__lowerCamelCase )
UpperCAmelCase_ : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(__lowerCamelCase )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = get_frequency_order(__lowerCamelCase )
UpperCAmelCase_ : int = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ (unittest.TestCase ):
'''simple docstring'''
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.dummy_uncond_unet
UpperCAmelCase_ : Union[str, Any] = PNDMScheduler()
UpperCAmelCase_ : Any = PNDMPipeline(unet=lowercase_ , scheduler=lowercase_ )
pndm.to(lowercase_ )
pndm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : List[Any] = pndm(generator=lowercase_ , num_inference_steps=20 , output_type="numpy" ).images
UpperCAmelCase_ : Tuple = torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = pndm(generator=lowercase_ , num_inference_steps=20 , output_type="numpy" , return_dict=lowercase_ )[0]
UpperCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : Tuple = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = "google/ddpm-cifar10-32"
UpperCAmelCase_ : Optional[Any] = UNetaDModel.from_pretrained(lowercase_ )
UpperCAmelCase_ : List[str] = PNDMScheduler()
UpperCAmelCase_ : Optional[int] = PNDMPipeline(unet=lowercase_ , scheduler=lowercase_ )
pndm.to(lowercase_ )
pndm.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = pndm(generator=lowercase_ , output_type="numpy" ).images
UpperCAmelCase_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : Union[str, Any] = np.array([0.15_64, 0.1_46_45, 0.14_06, 0.1_47_15, 0.1_24_25, 0.1_40_45, 0.1_31_15, 0.1_21_75, 0.1_25] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 351
|
"""simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_a = logging.getLogger()
def __a ( ):
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument("-f" )
UpperCAmelCase_ : Dict = parser.parse_args()
return args.f
class A_ (lowercase__ ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = logging.StreamHandler(sys.stdout )
logger.addHandler(lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(lowercase_ , "argv" , lowercase_ ):
UpperCAmelCase_ : List[str] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowercase_ , 0.6_66 )
@slow
@require_torch_non_multi_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(lowercase_ )
UpperCAmelCase_ : Optional[Any] = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowercase_ )
UpperCAmelCase_ : Dict = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowercase_ )
| 23
| 0
|
"""simple docstring"""
from manim import *
class A_ (lowercase__ ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase_ : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase_ : Tuple = [mem.copy() for i in range(6 )]
UpperCAmelCase_ : Optional[int] = [mem.copy() for i in range(6 )]
UpperCAmelCase_ : List[Any] = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
UpperCAmelCase_ : Optional[Any] = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
UpperCAmelCase_ : List[str] = VGroup(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0 )
UpperCAmelCase_ : Optional[int] = Text("CPU" , font_size=24 )
UpperCAmelCase_ : str = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase_ )
UpperCAmelCase_ : Optional[int] = [mem.copy() for i in range(4 )]
UpperCAmelCase_ : Tuple = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
UpperCAmelCase_ : Any = Text("GPU" , font_size=24 )
UpperCAmelCase_ : Dict = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase_ )
UpperCAmelCase_ : int = [mem.copy() for i in range(6 )]
UpperCAmelCase_ : Tuple = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
UpperCAmelCase_ : Optional[int] = Text("Model" , font_size=24 )
UpperCAmelCase_ : Optional[int] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
model.move_to([3, -1.0, 0] )
self.add(lowercase_ )
UpperCAmelCase_ : Dict = []
for i, rect in enumerate(lowercase_ ):
rect.set_stroke(lowercase_ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
UpperCAmelCase_ : Optional[int] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0 )
self.add(lowercase_ )
cpu_targs.append(lowercase_ )
UpperCAmelCase_ : Dict = [mem.copy() for i in range(6 )]
UpperCAmelCase_ : Union[str, Any] = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
UpperCAmelCase_ : Tuple = Text("Loaded Checkpoint" , font_size=24 )
UpperCAmelCase_ : Union[str, Any] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
UpperCAmelCase_ : int = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase_ : Dict = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase_ , lowercase_ )
UpperCAmelCase_ : List[Any] = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
UpperCAmelCase_ : Union[str, Any] = MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase_ ) , Write(lowercase_ ) )
self.play(Write(lowercase_ , run_time=1 ) , Create(lowercase_ , run_time=1 ) )
UpperCAmelCase_ : Tuple = []
UpperCAmelCase_ : str = []
for i, rect in enumerate(lowercase_ ):
UpperCAmelCase_ : int = fill.copy().set_fill(lowercase_ , opacity=0.7 )
target.move_to(lowercase_ )
first_animations.append(GrowFromCenter(lowercase_ , run_time=1 ) )
UpperCAmelCase_ : str = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5 ) )
self.play(*lowercase_ )
self.play(*lowercase_ )
self.wait()
| 352
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_a = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 23
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class A_ (unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=99 , lowercase_=32 , lowercase_=5 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=16 , lowercase_=2 , lowercase_=0.02 , lowercase_=4 , ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = parent
UpperCAmelCase_ : Tuple = batch_size
UpperCAmelCase_ : Any = seq_length
UpperCAmelCase_ : Union[str, Any] = is_training
UpperCAmelCase_ : str = use_attention_mask
UpperCAmelCase_ : List[Any] = use_token_type_ids
UpperCAmelCase_ : List[Any] = use_labels
UpperCAmelCase_ : Dict = vocab_size
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : List[str] = intermediate_size
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : int = hidden_dropout_prob
UpperCAmelCase_ : str = attention_probs_dropout_prob
UpperCAmelCase_ : Union[str, Any] = max_position_embeddings
UpperCAmelCase_ : Optional[int] = type_vocab_size
UpperCAmelCase_ : List[Any] = type_sequence_label_size
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : str = num_choices
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : List[str] = None
if self.use_attention_mask:
UpperCAmelCase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : List[str] = None
if self.use_token_type_ids:
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : List[Any] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ : List[str] = config_and_inputs
UpperCAmelCase_ : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ : str = config_and_inputs
UpperCAmelCase_ : int = True
UpperCAmelCase_ : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = True
SCREAMING_SNAKE_CASE__ : Dict = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = FlaxRobertaModelTester(self )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCAmelCase_ : str = model_class_name.from_pretrained("roberta-base" , from_pt=lowercase_ )
UpperCAmelCase_ : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase_ )
| 353
|
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a = logging.get_logger(__name__) # pylint: disable=invalid-name
_a = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=8 ):
UpperCAmelCase_ : List[str] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase_ : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , )
UpperCAmelCase_ : int = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
if latents is None:
UpperCAmelCase_ : Dict = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
UpperCAmelCase_ : str = latents.to(lowercase_ )
UpperCAmelCase_ : Dict = latents * scheduler.init_noise_sigma
return latents
def UpperCamelCase__ ( self , lowercase_=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCAmelCase_ : Any = torch.device(F"""cuda:{gpu_id}""" )
UpperCAmelCase_ : int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase_ , lowercase_ )
def UpperCamelCase__ ( self , lowercase_=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
UpperCAmelCase_ : Any = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=lowercase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase_ : List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase_ , UpperCAmelCase_ : str = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ )
# We'll offload the last model manually.
UpperCAmelCase_ : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase_ )
def __call__( self , lowercase_ , lowercase_ , lowercase_ = 512 , lowercase_ = 512 , lowercase_ = 100 , lowercase_ = 4.0 , lowercase_ = 1 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ):
"""simple docstring"""
UpperCAmelCase_ : str = self._execution_device
UpperCAmelCase_ : List[Any] = guidance_scale > 1.0
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : int = torch.cat(lowercase_ , dim=0 )
UpperCAmelCase_ : Any = image_embeds.shape[0] * num_images_per_prompt
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : List[Any] = torch.cat(lowercase_ , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase_ : Tuple = image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase_ : List[str] = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 )
UpperCAmelCase_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ )
self.scheduler.set_timesteps(lowercase_ , device=lowercase_ )
UpperCAmelCase_ : List[Any] = self.scheduler.timesteps
UpperCAmelCase_ : List[str] = self.unet.config.in_channels
UpperCAmelCase_ , UpperCAmelCase_ : Dict = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor )
# create initial latent
UpperCAmelCase_ : int = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase_ , lowercase_ , lowercase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_ : Union[str, Any] = {"image_embeds": image_embeds}
UpperCAmelCase_ : Optional[Any] = self.unet(
sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0]
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = noise_pred.chunk(2 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = variance_pred.chunk(2 )
UpperCAmelCase_ : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase_ : str = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase_ , UpperCAmelCase_ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : List[str] = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0]
# post-processing
UpperCAmelCase_ : Tuple = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCAmelCase_ : List[Any] = image * 0.5 + 0.5
UpperCAmelCase_ : int = image.clamp(0 , 1 )
UpperCAmelCase_ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_ : Dict = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 23
| 0
|
"""simple docstring"""
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class A_ (unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = JukeboxTokenizer
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def UpperCamelCase__ ( self ):
"""simple docstring"""
import torch
UpperCAmelCase_ : Any = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
UpperCAmelCase_ : str = tokenizer(**self.metas )["input_ids"]
# fmt: off
UpperCAmelCase_ : Any = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def UpperCamelCase__ ( self ):
"""simple docstring"""
import torch
UpperCAmelCase_ : Dict = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
UpperCAmelCase_ : Any = tokenizer(**self.metas )["input_ids"]
# fmt: off
UpperCAmelCase_ : Tuple = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 354
|
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a = logging.get_logger(__name__)
_a = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = """detr"""
SCREAMING_SNAKE_CASE__ : str = ["""past_key_values"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=100 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=6 , lowercase_=2048 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=256 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase_ : Union[str, Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : int = backbone_config.get("model_type" )
UpperCAmelCase_ : int = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ : Any = config_class.from_dict(lowercase_ )
# set timm attributes to None
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = None, None, None
UpperCAmelCase_ : int = use_timm_backbone
UpperCAmelCase_ : int = backbone_config
UpperCAmelCase_ : List[Any] = num_channels
UpperCAmelCase_ : int = num_queries
UpperCAmelCase_ : Union[str, Any] = d_model
UpperCAmelCase_ : str = encoder_ffn_dim
UpperCAmelCase_ : Tuple = encoder_layers
UpperCAmelCase_ : List[Any] = encoder_attention_heads
UpperCAmelCase_ : Union[str, Any] = decoder_ffn_dim
UpperCAmelCase_ : Optional[Any] = decoder_layers
UpperCAmelCase_ : Union[str, Any] = decoder_attention_heads
UpperCAmelCase_ : Optional[int] = dropout
UpperCAmelCase_ : List[str] = attention_dropout
UpperCAmelCase_ : Any = activation_dropout
UpperCAmelCase_ : str = activation_function
UpperCAmelCase_ : Tuple = init_std
UpperCAmelCase_ : Optional[Any] = init_xavier_std
UpperCAmelCase_ : Optional[Any] = encoder_layerdrop
UpperCAmelCase_ : Optional[int] = decoder_layerdrop
UpperCAmelCase_ : Tuple = encoder_layers
UpperCAmelCase_ : int = auxiliary_loss
UpperCAmelCase_ : Optional[Any] = position_embedding_type
UpperCAmelCase_ : Tuple = backbone
UpperCAmelCase_ : Optional[int] = use_pretrained_backbone
UpperCAmelCase_ : Dict = dilation
# Hungarian matcher
UpperCAmelCase_ : Union[str, Any] = class_cost
UpperCAmelCase_ : Any = bbox_cost
UpperCAmelCase_ : int = giou_cost
# Loss coefficients
UpperCAmelCase_ : str = mask_loss_coefficient
UpperCAmelCase_ : Any = dice_loss_coefficient
UpperCAmelCase_ : Optional[Any] = bbox_loss_coefficient
UpperCAmelCase_ : List[str] = giou_loss_coefficient
UpperCAmelCase_ : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.d_model
@classmethod
def UpperCamelCase__ ( cls , lowercase_ , **lowercase_ ):
"""simple docstring"""
return cls(backbone_config=lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCAmelCase_ : Union[str, Any] = self.backbone_config.to_dict()
UpperCAmelCase_ : str = self.__class__.model_type
return output
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = version.parse("""1.11""" )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 1E-5
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 12
| 23
| 0
|
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Optional[int] = FileLock(str(tmpdir / "foo.lock" ) )
UpperCAmelCase_ : Dict = FileLock(str(tmpdir / "foo.lock" ) )
UpperCAmelCase_ : str = 0.01
with locka.acquire():
with pytest.raises(__lowerCamelCase ):
UpperCAmelCase_ : str = time.time()
locka.acquire(__lowerCamelCase )
assert time.time() - _start > timeout
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Optional[Any] = "a" * 1000 + ".lock"
UpperCAmelCase_ : Optional[Any] = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(__lowerCamelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 255
UpperCAmelCase_ : Tuple = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__lowerCamelCase ):
locka.acquire(0 )
| 355
|
"""simple docstring"""
_a = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100_000)]
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Optional[int] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_a = [None] * 10_000_000
_a = True
_a = False
def __a ( __lowerCamelCase ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCAmelCase_ : Dict = chain(next_number(__lowerCamelCase ) )
UpperCAmelCase_ : List[str] = number_chain
while number < 1000_0000:
UpperCAmelCase_ : List[Any] = number_chain
number *= 10
return number_chain
def __a ( __lowerCamelCase = 1000_0000 ):
for i in range(1, __lowerCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 23
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_a = logging.get_logger(__name__)
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = feature_size
UpperCAmelCase_ : Any = sampling_rate
UpperCAmelCase_ : Any = padding_value
UpperCAmelCase_ : str = kwargs.pop("padding_side" , "right" )
UpperCAmelCase_ : List[str] = kwargs.pop("return_attention_mask" , lowercase_ )
super().__init__(**lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = True , lowercase_ = None , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = None , ):
"""simple docstring"""
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(lowercase_ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
UpperCAmelCase_ : Dict = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
UpperCAmelCase_ : Tuple = processed_features[self.model_input_names[0]]
UpperCAmelCase_ : List[str] = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowercase_ ) == 0:
if return_attention_mask:
UpperCAmelCase_ : Union[str, Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
UpperCAmelCase_ : List[str] = required_input[0]
if isinstance(lowercase_ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
UpperCAmelCase_ : Any = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowercase_ ):
UpperCAmelCase_ : Optional[Any] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowercase_ ):
UpperCAmelCase_ : Dict = "tf"
elif is_torch_tensor(lowercase_ ):
UpperCAmelCase_ : Any = "pt"
elif isinstance(lowercase_ , (int, float, list, tuple, np.ndarray) ):
UpperCAmelCase_ : str = "np"
else:
raise ValueError(
F"""type of {first_element} unknown: {type(lowercase_ )}. """
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
UpperCAmelCase_ : Optional[int] = to_numpy(lowercase_ )
else:
UpperCAmelCase_ : List[str] = [to_numpy(lowercase_ ) for v in value]
# Convert padding_strategy in PaddingStrategy
UpperCAmelCase_ : Dict = self._get_padding_strategies(padding=lowercase_ , max_length=lowercase_ )
UpperCAmelCase_ : str = processed_features[self.model_input_names[0]]
UpperCAmelCase_ : int = len(lowercase_ )
if not all(len(lowercase_ ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
UpperCAmelCase_ : int = []
for i in range(lowercase_ ):
UpperCAmelCase_ : str = {k: v[i] for k, v in processed_features.items()}
# truncation
UpperCAmelCase_ : List[str] = self._truncate(
lowercase_ , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , truncation=lowercase_ , )
truncated_inputs.append(lowercase_ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
UpperCAmelCase_ : str = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
UpperCAmelCase_ : Dict = PaddingStrategy.MAX_LENGTH
UpperCAmelCase_ : List[str] = {}
for i in range(lowercase_ ):
# padding
UpperCAmelCase_ : int = self._pad(
truncated_inputs[i] , max_length=lowercase_ , padding_strategy=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , )
for key, value in outputs.items():
if key not in batch_outputs:
UpperCAmelCase_ : Any = []
if value.dtype is np.dtype(np.floataa ):
UpperCAmelCase_ : List[Any] = value.astype(np.floataa )
batch_outputs[key].append(lowercase_ )
return BatchFeature(lowercase_ , tensor_type=lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = PaddingStrategy.DO_NOT_PAD , lowercase_ = None , lowercase_ = None , ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
UpperCAmelCase_ : Tuple = len(lowercase_ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
UpperCAmelCase_ : Tuple = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
UpperCAmelCase_ : Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowercase_ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
UpperCAmelCase_ : Optional[int] = np.ones(len(lowercase_ ) , dtype=np.intaa )
if needs_to_be_padded:
UpperCAmelCase_ : Dict = max_length - len(lowercase_ )
if self.padding_side == "right":
if return_attention_mask:
UpperCAmelCase_ : List[Any] = np.pad(
processed_features["attention_mask"] , (0, difference) )
UpperCAmelCase_ : Dict = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
UpperCAmelCase_ : Optional[Any] = np.pad(
lowercase_ , lowercase_ , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
UpperCAmelCase_ : Optional[Any] = np.pad(
processed_features["attention_mask"] , (difference, 0) )
UpperCAmelCase_ : Dict = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
UpperCAmelCase_ : str = np.pad(
lowercase_ , lowercase_ , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , ):
"""simple docstring"""
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
UpperCAmelCase_ : Optional[int] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
UpperCAmelCase_ : Union[str, Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
UpperCAmelCase_ : Optional[Any] = len(lowercase_ ) > max_length
if needs_to_be_truncated:
UpperCAmelCase_ : int = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
UpperCAmelCase_ : Dict = processed_features["attention_mask"][:max_length]
return processed_features
def UpperCamelCase__ ( self , lowercase_=False , lowercase_=None ):
"""simple docstring"""
# Get padding strategy
if padding is not False:
if padding is True:
UpperCAmelCase_ : Dict = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : Optional[Any] = PaddingStrategy(lowercase_ )
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : int = padding
else:
UpperCAmelCase_ : str = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 356
|
"""simple docstring"""
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# Return True if there is node that has not iterated.
UpperCAmelCase_ : List[Any] = [False] * len(__lowerCamelCase )
UpperCAmelCase_ : Any = []
queue.append(__lowerCamelCase )
UpperCAmelCase_ : Tuple = True
while queue:
UpperCAmelCase_ : str = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowerCamelCase )
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : Union[str, Any] = u
return visited[t]
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# This array is filled by BFS and to store path
UpperCAmelCase_ : List[str] = [-1] * (len(__lowerCamelCase ))
UpperCAmelCase_ : Any = 0
while bfs(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : int = float("Inf" )
UpperCAmelCase_ : Tuple = sink
while s != source:
# Find the minimum value in select path
UpperCAmelCase_ : Tuple = min(__lowerCamelCase, graph[parent[s]][s] )
UpperCAmelCase_ : Dict = parent[s]
max_flow += path_flow
UpperCAmelCase_ : Optional[Any] = sink
while v != source:
UpperCAmelCase_ : List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCAmelCase_ : Optional[int] = parent[v]
return max_flow
_a = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_a , _a = 0, 5
print(ford_fulkerson(graph, source, sink))
| 23
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = """trocr"""
SCREAMING_SNAKE_CASE__ : str = ["""past_key_values"""]
SCREAMING_SNAKE_CASE__ : str = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self , lowercase_=5_0265 , lowercase_=1024 , lowercase_=12 , lowercase_=16 , lowercase_=4096 , lowercase_="gelu" , lowercase_=512 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=2 , lowercase_=0.02 , lowercase_=0.0 , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=True , lowercase_=1 , lowercase_=0 , lowercase_=2 , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = vocab_size
UpperCAmelCase_ : Optional[Any] = d_model
UpperCAmelCase_ : Optional[int] = decoder_layers
UpperCAmelCase_ : Any = decoder_attention_heads
UpperCAmelCase_ : Optional[int] = decoder_ffn_dim
UpperCAmelCase_ : Any = activation_function
UpperCAmelCase_ : int = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = dropout
UpperCAmelCase_ : List[Any] = attention_dropout
UpperCAmelCase_ : List[Any] = activation_dropout
UpperCAmelCase_ : List[str] = init_std
UpperCAmelCase_ : Tuple = decoder_layerdrop
UpperCAmelCase_ : Any = use_cache
UpperCAmelCase_ : Union[str, Any] = scale_embedding
UpperCAmelCase_ : Dict = use_learned_position_embeddings
UpperCAmelCase_ : List[str] = layernorm_embedding
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
| 357
|
"""simple docstring"""
import datasets
_a = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n'
_a = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n'
_a = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n'
def __a ( __lowerCamelCase, __lowerCamelCase ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
return {"accuracy": simple_accuracy(lowercase_ , lowercase_ )}
| 23
| 0
|
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
_a = [
'good first issue',
'feature request',
'wip',
]
def __a ( ):
UpperCAmelCase_ : Optional[Any] = Github(os.environ["GITHUB_TOKEN"] )
UpperCAmelCase_ : List[Any] = g.get_repo("huggingface/accelerate" )
UpperCAmelCase_ : Union[str, Any] = repo.get_issues(state="open" )
for issue in open_issues:
UpperCAmelCase_ : Union[str, Any] = sorted([comment for comment in issue.get_comments()], key=lambda __lowerCamelCase : i.created_at, reverse=__lowerCamelCase )
UpperCAmelCase_ : Any = comments[0] if len(__lowerCamelCase ) > 0 else None
UpperCAmelCase_ : Optional[Any] = dt.utcnow()
UpperCAmelCase_ : Optional[int] = (current_time - issue.updated_at).days
UpperCAmelCase_ : str = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="closed" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 358
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_a = logging.get_logger(__name__)
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = feature_size
UpperCAmelCase_ : Any = sampling_rate
UpperCAmelCase_ : Any = padding_value
UpperCAmelCase_ : str = kwargs.pop("padding_side" , "right" )
UpperCAmelCase_ : List[str] = kwargs.pop("return_attention_mask" , lowercase_ )
super().__init__(**lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = True , lowercase_ = None , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = None , ):
"""simple docstring"""
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(lowercase_ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
UpperCAmelCase_ : Dict = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
UpperCAmelCase_ : Tuple = processed_features[self.model_input_names[0]]
UpperCAmelCase_ : List[str] = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowercase_ ) == 0:
if return_attention_mask:
UpperCAmelCase_ : Union[str, Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
UpperCAmelCase_ : List[str] = required_input[0]
if isinstance(lowercase_ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
UpperCAmelCase_ : Any = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowercase_ ):
UpperCAmelCase_ : Optional[Any] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowercase_ ):
UpperCAmelCase_ : Dict = "tf"
elif is_torch_tensor(lowercase_ ):
UpperCAmelCase_ : Any = "pt"
elif isinstance(lowercase_ , (int, float, list, tuple, np.ndarray) ):
UpperCAmelCase_ : str = "np"
else:
raise ValueError(
F"""type of {first_element} unknown: {type(lowercase_ )}. """
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
UpperCAmelCase_ : Optional[int] = to_numpy(lowercase_ )
else:
UpperCAmelCase_ : List[str] = [to_numpy(lowercase_ ) for v in value]
# Convert padding_strategy in PaddingStrategy
UpperCAmelCase_ : Dict = self._get_padding_strategies(padding=lowercase_ , max_length=lowercase_ )
UpperCAmelCase_ : str = processed_features[self.model_input_names[0]]
UpperCAmelCase_ : int = len(lowercase_ )
if not all(len(lowercase_ ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
UpperCAmelCase_ : int = []
for i in range(lowercase_ ):
UpperCAmelCase_ : str = {k: v[i] for k, v in processed_features.items()}
# truncation
UpperCAmelCase_ : List[str] = self._truncate(
lowercase_ , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , truncation=lowercase_ , )
truncated_inputs.append(lowercase_ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
UpperCAmelCase_ : str = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
UpperCAmelCase_ : Dict = PaddingStrategy.MAX_LENGTH
UpperCAmelCase_ : List[str] = {}
for i in range(lowercase_ ):
# padding
UpperCAmelCase_ : int = self._pad(
truncated_inputs[i] , max_length=lowercase_ , padding_strategy=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , )
for key, value in outputs.items():
if key not in batch_outputs:
UpperCAmelCase_ : Any = []
if value.dtype is np.dtype(np.floataa ):
UpperCAmelCase_ : List[Any] = value.astype(np.floataa )
batch_outputs[key].append(lowercase_ )
return BatchFeature(lowercase_ , tensor_type=lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = PaddingStrategy.DO_NOT_PAD , lowercase_ = None , lowercase_ = None , ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
UpperCAmelCase_ : Tuple = len(lowercase_ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
UpperCAmelCase_ : Tuple = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
UpperCAmelCase_ : Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowercase_ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
UpperCAmelCase_ : Optional[int] = np.ones(len(lowercase_ ) , dtype=np.intaa )
if needs_to_be_padded:
UpperCAmelCase_ : Dict = max_length - len(lowercase_ )
if self.padding_side == "right":
if return_attention_mask:
UpperCAmelCase_ : List[Any] = np.pad(
processed_features["attention_mask"] , (0, difference) )
UpperCAmelCase_ : Dict = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
UpperCAmelCase_ : Optional[Any] = np.pad(
lowercase_ , lowercase_ , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
UpperCAmelCase_ : Optional[Any] = np.pad(
processed_features["attention_mask"] , (difference, 0) )
UpperCAmelCase_ : Dict = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
UpperCAmelCase_ : str = np.pad(
lowercase_ , lowercase_ , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , ):
"""simple docstring"""
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
UpperCAmelCase_ : Optional[int] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
UpperCAmelCase_ : Union[str, Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
UpperCAmelCase_ : Optional[Any] = len(lowercase_ ) > max_length
if needs_to_be_truncated:
UpperCAmelCase_ : int = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
UpperCAmelCase_ : Dict = processed_features["attention_mask"][:max_length]
return processed_features
def UpperCamelCase__ ( self , lowercase_=False , lowercase_=None ):
"""simple docstring"""
# Get padding strategy
if padding is not False:
if padding is True:
UpperCAmelCase_ : Dict = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : Optional[Any] = PaddingStrategy(lowercase_ )
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : int = padding
else:
UpperCAmelCase_ : str = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 23
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class A_ (lowercase__ ,lowercase__ ,lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = StableUnCLIPImgaImgPipeline
SCREAMING_SNAKE_CASE__ : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
SCREAMING_SNAKE_CASE__ : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ : Tuple = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE__ : Optional[Any] = frozenset([] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = 32
UpperCAmelCase_ : Dict = embedder_hidden_size
# image encoding components
UpperCAmelCase_ : List[str] = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
UpperCAmelCase_ : str = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowercase_ , projection_dim=lowercase_ , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
UpperCAmelCase_ : Any = StableUnCLIPImageNormalizer(embedding_dim=lowercase_ )
UpperCAmelCase_ : int = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
UpperCAmelCase_ : Any = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase_ , layers_per_block=1 , upcast_attention=lowercase_ , use_linear_projection=lowercase_ , )
torch.manual_seed(0 )
UpperCAmelCase_ : int = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="v_prediction" , set_alpha_to_one=lowercase_ , steps_offset=1 , )
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = AutoencoderKL()
UpperCAmelCase_ : List[Any] = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def UpperCamelCase__ ( self , lowercase_ , lowercase_=0 , lowercase_=True ):
"""simple docstring"""
if str(lowercase_ ).startswith("mps" ):
UpperCAmelCase_ : Optional[int] = torch.manual_seed(lowercase_ )
else:
UpperCAmelCase_ : Optional[int] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
UpperCAmelCase_ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
if pil_image:
UpperCAmelCase_ : Any = input_image * 0.5 + 0.5
UpperCAmelCase_ : str = input_image.clamp(0 , 1 )
UpperCAmelCase_ : Dict = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCAmelCase_ : Optional[Any] = DiffusionPipeline.numpy_to_pil(lowercase_ )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase_ : Union[str, Any] = StableUnCLIPImgaImgPipeline(**lowercase_ )
UpperCAmelCase_ : Tuple = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase_ : Dict = self.get_dummy_inputs(lowercase_ )
inputs.update({"image_embeds": None} )
UpperCAmelCase_ : List[str] = sd_pipe(**lowercase_ ).images
UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : Optional[int] = np.array([0.38_72, 0.72_24, 0.56_01, 0.47_41, 0.68_72, 0.58_14, 0.46_36, 0.38_67, 0.50_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowercase_ )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowercase_ )
@slow
@require_torch_gpu
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
UpperCAmelCase_ : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
UpperCAmelCase_ : Optional[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ : Any = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = pipe(lowercase_ , "anime turle" , generator=lowercase_ , output_type="np" )
UpperCAmelCase_ : int = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
UpperCAmelCase_ : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
UpperCAmelCase_ : Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ : Any = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ : Tuple = pipe(lowercase_ , "anime turle" , generator=lowercase_ , output_type="np" )
UpperCAmelCase_ : Tuple = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_ : Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
UpperCAmelCase_ : str = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ : List[str] = pipe(
lowercase_ , "anime turtle" , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 359
|
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = torch.nn.Linear(10 , 10 )
UpperCAmelCase_ : List[str] = torch.optim.SGD(model.parameters() , 0.1 )
UpperCAmelCase_ : Optional[Any] = Accelerator()
UpperCAmelCase_ : Tuple = accelerator.prepare(lowercase_ )
try:
pickle.loads(pickle.dumps(lowercase_ ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 23
| 0
|
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = '▁'
_a = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
_a = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
_a = {
'facebook/s2t-small-librispeech-asr': 1_024,
}
_a = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
_a = {'mustc': MUSTC_LANGS}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : List[Any] = MAX_MODEL_INPUT_SIZES
SCREAMING_SNAKE_CASE__ : Dict = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE__ : List[int] = []
def __init__( self , lowercase_ , lowercase_ , lowercase_="<s>" , lowercase_="</s>" , lowercase_="<pad>" , lowercase_="<unk>" , lowercase_=False , lowercase_=False , lowercase_=None , lowercase_=None , lowercase_ = None , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , do_upper_case=lowercase_ , do_lower_case=lowercase_ , tgt_lang=lowercase_ , lang_codes=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
UpperCAmelCase_ : Optional[int] = do_upper_case
UpperCAmelCase_ : Optional[Any] = do_lower_case
UpperCAmelCase_ : Optional[int] = load_json(lowercase_ )
UpperCAmelCase_ : Any = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_ : Dict = spm_file
UpperCAmelCase_ : Dict = load_spm(lowercase_ , self.sp_model_kwargs )
if lang_codes is not None:
UpperCAmelCase_ : str = lang_codes
UpperCAmelCase_ : str = LANGUAGES[lang_codes]
UpperCAmelCase_ : List[Any] = [F"""<lang:{lang}>""" for lang in self.langs]
UpperCAmelCase_ : int = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs}
UpperCAmelCase_ : Dict = self.lang_tokens
UpperCAmelCase_ : Union[str, Any] = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
UpperCAmelCase_ : Optional[int] = {}
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.encoder )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._tgt_lang
@tgt_lang.setter
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = new_tgt_lang
self.set_tgt_lang_special_tokens(lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = self.lang_code_to_id[tgt_lang]
UpperCAmelCase_ : List[Any] = [lang_code_id]
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return self.encoder.get(lowercase_ , self.encoder[self.unk_token] )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return self.decoder.get(lowercase_ , self.unk_token )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : List[str] = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
UpperCAmelCase_ : Any = self.sp_model.decode(lowercase_ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
UpperCAmelCase_ : str = []
else:
current_sub_tokens.append(lowercase_ )
UpperCAmelCase_ : Union[str, Any] = self.sp_model.decode(lowercase_ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def UpperCamelCase__ ( self , lowercase_ , lowercase_=None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
UpperCAmelCase_ : Optional[int] = [1] * len(self.prefix_tokens )
UpperCAmelCase_ : Any = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(lowercase_ )) + suffix_ones
return prefix_ones + ([0] * len(lowercase_ )) + ([0] * len(lowercase_ )) + suffix_ones
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
UpperCAmelCase_ : str = self.__dict__.copy()
UpperCAmelCase_ : Dict = None
return state
def __setstate__( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : int = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
UpperCAmelCase_ : int = Path(lowercase_ )
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
UpperCAmelCase_ : Optional[int] = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
UpperCAmelCase_ : List[Any] = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , lowercase_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , lowercase_ )
elif not os.path.isfile(self.spm_file ):
with open(lowercase_ , "wb" ) as fi:
UpperCAmelCase_ : Any = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (str(lowercase_ ), str(lowercase_ ))
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : List[str] = sentencepiece.SentencePieceProcessor(**__lowerCamelCase )
spm.Load(str(__lowerCamelCase ) )
return spm
def __a ( __lowerCamelCase ):
with open(__lowerCamelCase, "r" ) as f:
return json.load(__lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase ):
with open(__lowerCamelCase, "w" ) as f:
json.dump(__lowerCamelCase, __lowerCamelCase, indent=2 )
| 360
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = """ctrl"""
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""past_key_values"""]
SCREAMING_SNAKE_CASE__ : List[str] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowercase_=24_6534 , lowercase_=256 , lowercase_=1280 , lowercase_=8192 , lowercase_=48 , lowercase_=16 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=1E-6 , lowercase_=0.02 , lowercase_=True , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : Union[str, Any] = n_positions
UpperCAmelCase_ : List[str] = n_embd
UpperCAmelCase_ : Dict = n_layer
UpperCAmelCase_ : Optional[int] = n_head
UpperCAmelCase_ : List[str] = dff
UpperCAmelCase_ : Tuple = resid_pdrop
UpperCAmelCase_ : Optional[Any] = embd_pdrop
UpperCAmelCase_ : str = layer_norm_epsilon
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : List[str] = use_cache
super().__init__(**lowercase_ )
| 23
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
_a = logging.get_logger(__name__)
_a = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_a = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
_a = {
'junnyu/roformer_chinese_small': 1_536,
'junnyu/roformer_chinese_base': 1_536,
'junnyu/roformer_chinese_char_small': 512,
'junnyu/roformer_chinese_char_base': 512,
'junnyu/roformer_small_discriminator': 128,
'junnyu/roformer_small_generator': 128,
}
_a = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : List[str] = RoFormerTokenizer
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=True , lowercase_="[UNK]" , lowercase_="[SEP]" , lowercase_="[PAD]" , lowercase_="[CLS]" , lowercase_="[MASK]" , lowercase_=True , lowercase_=None , **lowercase_ , ):
"""simple docstring"""
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , tokenize_chinese_chars=lowercase_ , strip_accents=lowercase_ , **lowercase_ , )
UpperCAmelCase_ : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase" , lowercase_ ) != do_lower_case
or pre_tok_state.get("strip_accents" , lowercase_ ) != strip_accents
):
UpperCAmelCase_ : List[str] = getattr(lowercase_ , pre_tok_state.pop("type" ) )
UpperCAmelCase_ : Union[str, Any] = do_lower_case
UpperCAmelCase_ : Union[str, Any] = strip_accents
UpperCAmelCase_ : int = pre_tok_class(**lowercase_ )
UpperCAmelCase_ : List[Any] = do_lower_case
def __getstate__( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = self.__dict__.copy()
UpperCAmelCase_ : Tuple = BertPreTokenizer()
return state
def __setstate__( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = d
UpperCAmelCase_ : Optional[Any] = self.__dict__["_tokenizer"].get_vocab()
UpperCAmelCase_ : Tuple = PreTokenizer.custom(JiebaPreTokenizer(lowercase_ ) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_=None ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
UpperCAmelCase_ : int = [self.sep_token_id]
UpperCAmelCase_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=False , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Dict = BertPreTokenizer()
return super().save_pretrained(lowercase_ , lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
| 361
|
"""simple docstring"""
def __a ( __lowerCamelCase ):
assert isinstance(__lowerCamelCase, __lowerCamelCase ), f"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
UpperCAmelCase_ : str = f"""The input value of [n={number}] has to be > 0"""
raise ValueError(__lowerCamelCase )
else:
UpperCAmelCase_ : List[str] = sylvester(number - 1 )
UpperCAmelCase_ : List[str] = num - 1
UpperCAmelCase_ : List[str] = num
return lower * upper + 1
if __name__ == "__main__":
print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 23
| 0
|
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class A_ (lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = XLMProphetNetTokenizer
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ : Optional[int] = XLMProphetNetTokenizer(lowercase_ , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = "[PAD]"
UpperCAmelCase_ : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "[PAD]" )
self.assertEqual(vocab_keys[1] , "[CLS]" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(lowercase_ ) , 1012 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = XLMProphetNetTokenizer(lowercase_ , keep_accents=lowercase_ )
UpperCAmelCase_ : Dict = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowercase_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase_ : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ : Dict = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
UpperCAmelCase_ : List[Any] = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] , )
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = "Hello World!"
UpperCAmelCase_ : Optional[Any] = [3_5389, 6672, 49, 2]
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = {"input_ids": [[1_1073, 8_2783, 18, 26, 8_2783, 549, 5_1540, 248, 1_7209, 1301, 217, 20, 21_5186, 1325, 147, 1_7209, 1301, 217, 20, 5_6370, 53, 12_2020, 20, 1_6477, 27, 8_7355, 4548, 20, 4728, 7_8392, 17, 15_9969, 18, 26, 2_4491, 629, 15, 538, 2_2704, 5439, 15, 2788, 2_4491, 9885, 15, 4_3534, 605, 15, 814, 1_8403, 3_3200, 29, 15, 4_3534, 2_4458, 1_2410, 111, 2_4966, 8_3669, 9637, 14_4068, 26, 850, 2_2346, 27, 147, 2_4966, 8_3669, 8_3490, 26, 3_9113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 12_2020, 11_5785, 34, 816, 1339, 4_6887, 18, 147, 5_3905, 1951, 4_2238, 4_1170, 1_7732, 834, 436, 15, 2_7523, 9_8733, 217, 147, 5542, 4981, 930, 1_7347, 16, 2], [2_0091, 629, 94, 8_2786, 58, 490, 20, 1528, 84, 5_3905, 344, 8_0592, 11_0128, 1_8822, 5267, 1306, 62, 15_2537, 308, 7997, 401, 12_4427, 549, 3_5442, 225, 109, 1_5055, 2_5748, 147, 7119, 4_3712, 34, 767, 13_5366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 6_3784, 11_9466, 17, 14_7808, 8_8214, 18, 656, 81, 32, 3296, 1_0280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
| 362
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class A_ (lowercase__ ,lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = IFImgaImgSuperResolutionPipeline
SCREAMING_SNAKE_CASE__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""}
SCREAMING_SNAKE_CASE__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} )
SCREAMING_SNAKE_CASE__ : List[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def UpperCamelCase__ ( self , lowercase_ , lowercase_=0 ):
"""simple docstring"""
if str(lowercase_ ).startswith("mps" ):
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(lowercase_ )
else:
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
UpperCAmelCase_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
UpperCAmelCase_ : Optional[int] = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
UpperCAmelCase_ : int = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_save_load_local()
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 23
| 0
|
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def __a ( __lowerCamelCase, __lowerCamelCase = True, __lowerCamelCase = math.inf, __lowerCamelCase = -math.inf, __lowerCamelCase = math.inf, __lowerCamelCase = -math.inf, __lowerCamelCase = False, __lowerCamelCase = 100, __lowerCamelCase = 0.01, __lowerCamelCase = 1, ):
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : Any = search_prob
UpperCAmelCase_ : Any = start_temperate
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : Optional[int] = None
while not search_end:
UpperCAmelCase_ : int = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCAmelCase_ : Union[str, Any] = current_state
scores.append(__lowerCamelCase )
iterations += 1
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Optional[int] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCAmelCase_ : List[str] = random.randint(0, len(__lowerCamelCase ) - 1 ) # picking a random neighbor
UpperCAmelCase_ : List[Any] = neighbors.pop(__lowerCamelCase )
UpperCAmelCase_ : Dict = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCAmelCase_ : Optional[Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCAmelCase_ : Optional[Any] = picked_neighbor
else:
UpperCAmelCase_ : str = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCAmelCase_ : str = picked_neighbor
UpperCAmelCase_ : List[str] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCAmelCase_ : Optional[int] = True
else:
UpperCAmelCase_ : List[str] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(__lowerCamelCase ), __lowerCamelCase )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def __a ( __lowerCamelCase, __lowerCamelCase ):
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
_a = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_a = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
f"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
_a = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_a = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
f"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def __a ( __lowerCamelCase, __lowerCamelCase ):
return (3 * x**2) - (6 * y)
_a = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_a = simulated_annealing(prob, find_max=False, visualization=True)
print(
'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
f"""{local_min.score()}"""
)
_a = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_a = simulated_annealing(prob, find_max=True, visualization=True)
print(
'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
f"""{local_min.score()}"""
)
| 363
|
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = "ylacombe/bark-small"
UpperCAmelCase_ : Union[str, Any] = tempfile.mkdtemp()
UpperCAmelCase_ : List[str] = "en_speaker_1"
UpperCAmelCase_ : Tuple = "This is a test string"
UpperCAmelCase_ : List[Any] = "speaker_embeddings_path.json"
UpperCAmelCase_ : Any = "speaker_embeddings"
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.get_tokenizer()
UpperCAmelCase_ : Union[str, Any] = BarkProcessor(tokenizer=lowercase_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
UpperCAmelCase_ : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCAmelCase_ : Union[str, Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
UpperCAmelCase_ : int = 35
UpperCAmelCase_ : Optional[Any] = 2
UpperCAmelCase_ : List[Any] = 8
UpperCAmelCase_ : Optional[Any] = {
"semantic_prompt": np.ones(lowercase_ ),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ),
"fine_prompt": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCAmelCase_ : Dict = processor(text=self.input_string , voice_preset=lowercase_ )
UpperCAmelCase_ : List[str] = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , "file.npz" )
np.savez(lowercase_ , **lowercase_ )
UpperCAmelCase_ : Optional[int] = processor(text=self.input_string , voice_preset=lowercase_ )
UpperCAmelCase_ : List[str] = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCAmelCase_ : Tuple = processor(text=self.input_string , voice_preset=self.voice_preset )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.get_tokenizer()
UpperCAmelCase_ : Optional[Any] = BarkProcessor(tokenizer=lowercase_ )
UpperCAmelCase_ : Tuple = processor(text=self.input_string )
UpperCAmelCase_ : Union[str, Any] = tokenizer(
self.input_string , padding="max_length" , max_length=256 , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 23
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
_a = None
_a = logging.get_logger(__name__)
_a = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
_a = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
_a = {
'facebook/mbart-large-en-ro': 1_024,
'facebook/mbart-large-cc25': 1_024,
}
# fmt: off
_a = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : List[Any] = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE__ : Any = MBartTokenizer
SCREAMING_SNAKE_CASE__ : List[int] = []
SCREAMING_SNAKE_CASE__ : List[int] = []
def __init__( self , lowercase_=None , lowercase_=None , lowercase_="<s>" , lowercase_="</s>" , lowercase_="</s>" , lowercase_="<s>" , lowercase_="<unk>" , lowercase_="<pad>" , lowercase_="<mask>" , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : int = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
super().__init__(
vocab_file=lowercase_ , tokenizer_file=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , src_lang=lowercase_ , tgt_lang=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , )
UpperCAmelCase_ : str = vocab_file
UpperCAmelCase_ : Dict = False if not self.vocab_file else True
UpperCAmelCase_ : Any = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
UpperCAmelCase_ : List[str] = {
lang_code: self.convert_tokens_to_ids(lowercase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCAmelCase_ : Optional[Any] = src_lang if src_lang is not None else "en_XX"
UpperCAmelCase_ : Any = self.convert_tokens_to_ids(self._src_lang )
UpperCAmelCase_ : Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
UpperCAmelCase_ : str = src_lang
UpperCAmelCase_ : Optional[Any] = self(lowercase_ , add_special_tokens=lowercase_ , return_tensors=lowercase_ , **lowercase_ )
UpperCAmelCase_ : Optional[int] = self.convert_tokens_to_ids(lowercase_ )
UpperCAmelCase_ : Union[str, Any] = tgt_lang_id
return inputs
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = "en_XX" , lowercase_ = None , lowercase_ = "ro_RO" , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = src_lang
UpperCAmelCase_ : Optional[Any] = tgt_lang
return super().prepare_seqaseq_batch(lowercase_ , lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = self.convert_tokens_to_ids(lowercase_ )
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Dict = [self.eos_token_id, self.cur_lang_code]
UpperCAmelCase_ : List[str] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase_ : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase_ : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : int = self.convert_tokens_to_ids(lowercase_ )
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : Dict = [self.eos_token_id, self.cur_lang_code]
UpperCAmelCase_ : Tuple = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase_ : int = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase_ : str = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowercase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
UpperCAmelCase_ : Optional[Any] = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
return (out_vocab_file,)
| 364
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def __a ( __lowerCamelCase, __lowerCamelCase=False ):
UpperCAmelCase_ : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
UpperCAmelCase_ : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase_ : int = ""
else:
UpperCAmelCase_ : Union[str, Any] = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase_ : Tuple = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase_ : Dict = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase_ : Any = in_proj_bias[: config.hidden_size]
UpperCAmelCase_ : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase_ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase_ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase_ : str = in_proj_bias[-config.hidden_size :]
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Tuple = dct.pop(__lowerCamelCase )
UpperCAmelCase_ : Tuple = val
def __a ( ):
UpperCAmelCase_ : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ : str = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : List[str] = DeiTConfig()
# all deit models have fine-tuned heads
UpperCAmelCase_ : Optional[int] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase_ : Tuple = 1000
UpperCAmelCase_ : str = "huggingface/label-files"
UpperCAmelCase_ : str = "imagenet-1k-id2label.json"
UpperCAmelCase_ : List[Any] = json.load(open(hf_hub_download(__lowerCamelCase, __lowerCamelCase, repo_type="dataset" ), "r" ) )
UpperCAmelCase_ : List[str] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
UpperCAmelCase_ : Any = idalabel
UpperCAmelCase_ : int = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : Any = int(deit_name[-6:-4] )
UpperCAmelCase_ : Dict = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
UpperCAmelCase_ : Any = 192
UpperCAmelCase_ : Union[str, Any] = 768
UpperCAmelCase_ : Union[str, Any] = 12
UpperCAmelCase_ : int = 3
elif deit_name[9:].startswith("small" ):
UpperCAmelCase_ : List[str] = 384
UpperCAmelCase_ : List[str] = 1536
UpperCAmelCase_ : Dict = 12
UpperCAmelCase_ : Any = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
UpperCAmelCase_ : int = 1024
UpperCAmelCase_ : List[Any] = 4096
UpperCAmelCase_ : Optional[int] = 24
UpperCAmelCase_ : int = 16
# load original model from timm
UpperCAmelCase_ : Union[str, Any] = timm.create_model(__lowerCamelCase, pretrained=__lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase_ : Optional[Any] = timm_model.state_dict()
UpperCAmelCase_ : Tuple = create_rename_keys(__lowerCamelCase, __lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
read_in_q_k_v(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# load HuggingFace model
UpperCAmelCase_ : str = DeiTForImageClassificationWithTeacher(__lowerCamelCase ).eval()
model.load_state_dict(__lowerCamelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
UpperCAmelCase_ : Union[str, Any] = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
UpperCAmelCase_ : Optional[Any] = DeiTImageProcessor(size=__lowerCamelCase, crop_size=config.image_size )
UpperCAmelCase_ : Any = image_processor(images=prepare_img(), return_tensors="pt" )
UpperCAmelCase_ : int = encoding["pixel_values"]
UpperCAmelCase_ : Optional[Any] = model(__lowerCamelCase )
UpperCAmelCase_ : Any = timm_model(__lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCamelCase, outputs.logits, atol=1E-3 )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_a = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 23
| 0
|
"""simple docstring"""
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'
)
_a = None
_a = {
'7B': 11_008,
'13B': 13_824,
'30B': 17_920,
'65B': 22_016,
'70B': 28_672,
}
_a = {
'7B': 1,
'7Bf': 1,
'13B': 2,
'13Bf': 2,
'30B': 4,
'65B': 8,
'70B': 8,
'70Bf': 8,
}
def __a ( __lowerCamelCase, __lowerCamelCase=1, __lowerCamelCase=256 ):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def __a ( __lowerCamelCase ):
with open(__lowerCamelCase, "r" ) as f:
return json.load(__lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase ):
with open(__lowerCamelCase, "w" ) as f:
json.dump(__lowerCamelCase, __lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=True ):
os.makedirs(__lowerCamelCase, exist_ok=__lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = os.path.join(__lowerCamelCase, "tmp" )
os.makedirs(__lowerCamelCase, exist_ok=__lowerCamelCase )
UpperCAmelCase_ : List[Any] = read_json(os.path.join(__lowerCamelCase, "params.json" ) )
UpperCAmelCase_ : str = NUM_SHARDS[model_size]
UpperCAmelCase_ : Union[str, Any] = params["n_layers"]
UpperCAmelCase_ : Union[str, Any] = params["n_heads"]
UpperCAmelCase_ : Union[str, Any] = n_heads // num_shards
UpperCAmelCase_ : int = params["dim"]
UpperCAmelCase_ : Tuple = dim // n_heads
UpperCAmelCase_ : Any = 1_0000.0
UpperCAmelCase_ : Optional[int] = 1.0 / (base ** (torch.arange(0, __lowerCamelCase, 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
UpperCAmelCase_ : List[str] = params["n_kv_heads"] # for GQA / MQA
UpperCAmelCase_ : List[Any] = n_heads_per_shard // num_key_value_heads
UpperCAmelCase_ : List[str] = dim // num_key_value_heads
else: # compatibility with other checkpoints
UpperCAmelCase_ : Dict = n_heads
UpperCAmelCase_ : List[Any] = n_heads_per_shard
UpperCAmelCase_ : Any = dim
# permute for sliced rotary
def permute(__lowerCamelCase, __lowerCamelCase=n_heads, __lowerCamelCase=dim, __lowerCamelCase=dim ):
return w.view(__lowerCamelCase, dima // n_heads // 2, 2, __lowerCamelCase ).transpose(1, 2 ).reshape(__lowerCamelCase, __lowerCamelCase )
print(f"""Fetching all parameters from the checkpoint at {input_base_path}.""" )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
UpperCAmelCase_ : Dict = torch.load(os.path.join(__lowerCamelCase, "consolidated.00.pth" ), map_location="cpu" )
else:
# Sharded
UpperCAmelCase_ : List[Any] = [
torch.load(os.path.join(__lowerCamelCase, f"""consolidated.{i:02d}.pth""" ), map_location="cpu" )
for i in range(__lowerCamelCase )
]
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : Optional[int] = {"weight_map": {}}
for layer_i in range(__lowerCamelCase ):
UpperCAmelCase_ : List[str] = f"""pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
UpperCAmelCase_ : List[Any] = {
f"""model.layers.{layer_i}.self_attn.q_proj.weight""": permute(
loaded[f"""layers.{layer_i}.attention.wq.weight"""] ),
f"""model.layers.{layer_i}.self_attn.k_proj.weight""": permute(
loaded[f"""layers.{layer_i}.attention.wk.weight"""] ),
f"""model.layers.{layer_i}.self_attn.v_proj.weight""": loaded[f"""layers.{layer_i}.attention.wv.weight"""],
f"""model.layers.{layer_i}.self_attn.o_proj.weight""": loaded[f"""layers.{layer_i}.attention.wo.weight"""],
f"""model.layers.{layer_i}.mlp.gate_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w1.weight"""],
f"""model.layers.{layer_i}.mlp.down_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w2.weight"""],
f"""model.layers.{layer_i}.mlp.up_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w3.weight"""],
f"""model.layers.{layer_i}.input_layernorm.weight""": loaded[f"""layers.{layer_i}.attention_norm.weight"""],
f"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[f"""layers.{layer_i}.ffn_norm.weight"""],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
UpperCAmelCase_ : Union[str, Any] = {
f"""model.layers.{layer_i}.input_layernorm.weight""": loaded[0][
f"""layers.{layer_i}.attention_norm.weight"""
].clone(),
f"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[0][
f"""layers.{layer_i}.ffn_norm.weight"""
].clone(),
}
UpperCAmelCase_ : Dict = permute(
torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wq.weight"""].view(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
for i in range(__lowerCamelCase )
], dim=0, ).reshape(__lowerCamelCase, __lowerCamelCase ) )
UpperCAmelCase_ : List[str] = permute(
torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wk.weight"""].view(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
for i in range(__lowerCamelCase )
], dim=0, ).reshape(__lowerCamelCase, __lowerCamelCase ), __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, )
UpperCAmelCase_ : int = torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wv.weight"""].view(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
for i in range(__lowerCamelCase )
], dim=0, ).reshape(__lowerCamelCase, __lowerCamelCase )
UpperCAmelCase_ : List[str] = torch.cat(
[loaded[i][f"""layers.{layer_i}.attention.wo.weight"""] for i in range(__lowerCamelCase )], dim=1 )
UpperCAmelCase_ : str = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w1.weight"""] for i in range(__lowerCamelCase )], dim=0 )
UpperCAmelCase_ : Dict = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w2.weight"""] for i in range(__lowerCamelCase )], dim=1 )
UpperCAmelCase_ : Optional[Any] = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w3.weight"""] for i in range(__lowerCamelCase )], dim=0 )
UpperCAmelCase_ : List[Any] = inv_freq
for k, v in state_dict.items():
UpperCAmelCase_ : List[str] = filename
param_count += v.numel()
torch.save(__lowerCamelCase, os.path.join(__lowerCamelCase, __lowerCamelCase ) )
UpperCAmelCase_ : str = f"""pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
UpperCAmelCase_ : List[Any] = {
"model.embed_tokens.weight": loaded["tok_embeddings.weight"],
"model.norm.weight": loaded["norm.weight"],
"lm_head.weight": loaded["output.weight"],
}
else:
UpperCAmelCase_ : Tuple = {
"model.norm.weight": loaded[0]["norm.weight"],
"model.embed_tokens.weight": torch.cat(
[loaded[i]["tok_embeddings.weight"] for i in range(__lowerCamelCase )], dim=1 ),
"lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(__lowerCamelCase )], dim=0 ),
}
for k, v in state_dict.items():
UpperCAmelCase_ : Dict = filename
param_count += v.numel()
torch.save(__lowerCamelCase, os.path.join(__lowerCamelCase, __lowerCamelCase ) )
# Write configs
UpperCAmelCase_ : Optional[Any] = {"total_size": param_count * 2}
write_json(__lowerCamelCase, os.path.join(__lowerCamelCase, "pytorch_model.bin.index.json" ) )
UpperCAmelCase_ : Optional[Any] = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1
UpperCAmelCase_ : List[str] = params["multiple_of"] if "multiple_of" in params else 256
UpperCAmelCase_ : str = LlamaConfig(
hidden_size=__lowerCamelCase, intermediate_size=compute_intermediate_size(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), num_attention_heads=params["n_heads"], num_hidden_layers=params["n_layers"], rms_norm_eps=params["norm_eps"], num_key_value_heads=__lowerCamelCase, )
config.save_pretrained(__lowerCamelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("Loading the checkpoint in a Llama model." )
UpperCAmelCase_ : Tuple = LlamaForCausalLM.from_pretrained(__lowerCamelCase, torch_dtype=torch.floataa, low_cpu_mem_usage=__lowerCamelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("Saving in the Transformers format." )
model.save_pretrained(__lowerCamelCase, safe_serialization=__lowerCamelCase )
shutil.rmtree(__lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase ):
# Initialize the tokenizer based on the `spm` model
UpperCAmelCase_ : Any = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f"""Saving a {tokenizer_class.__name__} to {tokenizer_path}.""" )
UpperCAmelCase_ : List[Any] = tokenizer_class(__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
def __a ( ):
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--input_dir", help="Location of LLaMA weights, which contains tokenizer.model and model folders", )
parser.add_argument(
"--model_size", choices=["7B", "7Bf", "13B", "13Bf", "30B", "65B", "70B", "70Bf", "tokenizer_only"], )
parser.add_argument(
"--output_dir", help="Location to write HF model and tokenizer", )
parser.add_argument("--safe_serialization", type=__lowerCamelCase, help="Whether or not to save using `safetensors`." )
UpperCAmelCase_ : Dict = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir, input_base_path=os.path.join(args.input_dir, args.model_size ), model_size=args.model_size, safe_serialization=args.safe_serialization, )
UpperCAmelCase_ : str = os.path.join(args.input_dir, "tokenizer.model" )
write_tokenizer(args.output_dir, __lowerCamelCase )
if __name__ == "__main__":
main()
| 365
|
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
UpperCAmelCase_ : Optional[Any] = FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowercase_ , cache_dir=lowercase_ )
UpperCAmelCase_ : List[Any] = [t[-1] for t in os.walk(os.path.join(lowercase_ , os.listdir(lowercase_ )[0] , "snapshots" ) )]
UpperCAmelCase_ : Dict = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin" ) for f in files )
@slow
@require_flax
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowercase_ )
UpperCAmelCase_ : Tuple = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : List[Any] = jax.random.PRNGKey(0 )
UpperCAmelCase_ : List[str] = 4
UpperCAmelCase_ : Tuple = jax.device_count()
UpperCAmelCase_ : Optional[int] = num_samples * [prompt]
UpperCAmelCase_ : List[Any] = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
UpperCAmelCase_ : int = replicate(lowercase_ )
UpperCAmelCase_ : str = jax.random.split(lowercase_ , lowercase_ )
UpperCAmelCase_ : List[str] = shard(lowercase_ )
UpperCAmelCase_ : Dict = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1E-3
assert np.abs(np.abs(lowercase_ , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5E-1
UpperCAmelCase_ : List[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowercase_ ) == num_samples
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=lowercase_ )
UpperCAmelCase_ : Optional[int] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : str = jax.random.PRNGKey(0 )
UpperCAmelCase_ : Union[str, Any] = 50
UpperCAmelCase_ : List[str] = jax.device_count()
UpperCAmelCase_ : List[str] = num_samples * [prompt]
UpperCAmelCase_ : Union[str, Any] = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
UpperCAmelCase_ : Any = replicate(lowercase_ )
UpperCAmelCase_ : List[str] = jax.random.split(lowercase_ , lowercase_ )
UpperCAmelCase_ : List[str] = shard(lowercase_ )
UpperCAmelCase_ : int = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1E-3
assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ )
UpperCAmelCase_ : Any = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : str = jax.random.PRNGKey(0 )
UpperCAmelCase_ : str = 50
UpperCAmelCase_ : List[str] = jax.device_count()
UpperCAmelCase_ : List[Any] = num_samples * [prompt]
UpperCAmelCase_ : Any = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
UpperCAmelCase_ : Dict = replicate(lowercase_ )
UpperCAmelCase_ : str = jax.random.split(lowercase_ , lowercase_ )
UpperCAmelCase_ : Union[str, Any] = shard(lowercase_ )
UpperCAmelCase_ : List[Any] = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : str = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa )
UpperCAmelCase_ : List[Any] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : Dict = jax.random.PRNGKey(0 )
UpperCAmelCase_ : Optional[int] = 50
UpperCAmelCase_ : Optional[int] = jax.device_count()
UpperCAmelCase_ : str = num_samples * [prompt]
UpperCAmelCase_ : int = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
UpperCAmelCase_ : Union[str, Any] = replicate(lowercase_ )
UpperCAmelCase_ : Union[str, Any] = jax.random.split(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[int] = shard(lowercase_ )
UpperCAmelCase_ : Any = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , set_alpha_to_one=lowercase_ , steps_offset=1 , )
UpperCAmelCase_ , UpperCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=lowercase_ , safety_checker=lowercase_ , )
UpperCAmelCase_ : List[Any] = scheduler.create_state()
UpperCAmelCase_ : int = scheduler_state
UpperCAmelCase_ : Union[str, Any] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : Optional[Any] = jax.random.PRNGKey(0 )
UpperCAmelCase_ : int = 50
UpperCAmelCase_ : str = jax.device_count()
UpperCAmelCase_ : List[Any] = num_samples * [prompt]
UpperCAmelCase_ : int = pipeline.prepare_inputs(lowercase_ )
# shard inputs and rng
UpperCAmelCase_ : int = replicate(lowercase_ )
UpperCAmelCase_ : List[str] = jax.random.split(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[Any] = shard(lowercase_ )
UpperCAmelCase_ : Any = pipeline(lowercase_ , lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1E-3
assert np.abs((np.abs(lowercase_ , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
UpperCAmelCase_ : List[str] = jax.device_count()
UpperCAmelCase_ : List[Any] = num_samples * [prompt]
UpperCAmelCase_ : Union[str, Any] = jax.random.split(jax.random.PRNGKey(0 ) , lowercase_ )
UpperCAmelCase_ , UpperCAmelCase_ : str = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ , )
UpperCAmelCase_ : Any = replicate(lowercase_ )
UpperCAmelCase_ : List[str] = pipeline.prepare_inputs(lowercase_ )
UpperCAmelCase_ : List[str] = shard(lowercase_ )
UpperCAmelCase_ : List[Any] = pipeline(lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
UpperCAmelCase_ : int = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
UpperCAmelCase_ , UpperCAmelCase_ : int = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowercase_ , use_memory_efficient_attention=lowercase_ , )
UpperCAmelCase_ : str = replicate(lowercase_ )
UpperCAmelCase_ : str = pipeline.prepare_inputs(lowercase_ )
UpperCAmelCase_ : Optional[int] = shard(lowercase_ )
UpperCAmelCase_ : str = pipeline(lowercase_ , lowercase_ , lowercase_ , jit=lowercase_ ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
UpperCAmelCase_ : Optional[int] = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 23
| 0
|
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
UpperCAmelCase_ : Tuple = TapasConfig.from_json_file(__lowerCamelCase )
# set absolute/relative position embeddings parameter
UpperCAmelCase_ : Union[str, Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
UpperCAmelCase_ : str = TapasForQuestionAnswering(config=__lowerCamelCase )
elif task == "WTQ":
# run_task_main.py hparams
UpperCAmelCase_ : List[Any] = 4
UpperCAmelCase_ : Dict = True
# hparam_utils.py hparams
UpperCAmelCase_ : Optional[int] = 0.66_4694
UpperCAmelCase_ : List[str] = 0.20_7951
UpperCAmelCase_ : Union[str, Any] = 0.12_1194
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : str = 0.035_2513
UpperCAmelCase_ : List[Any] = TapasForQuestionAnswering(config=__lowerCamelCase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
UpperCAmelCase_ : Dict = 4
UpperCAmelCase_ : Union[str, Any] = False
# hparam_utils.py hparams
UpperCAmelCase_ : Optional[int] = 36.4519
UpperCAmelCase_ : List[Any] = 0.90_3421
UpperCAmelCase_ : Union[str, Any] = 222.088
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : str = 0.76_3141
UpperCAmelCase_ : Dict = TapasForQuestionAnswering(config=__lowerCamelCase )
elif task == "TABFACT":
UpperCAmelCase_ : List[str] = TapasForSequenceClassification(config=__lowerCamelCase )
elif task == "MLM":
UpperCAmelCase_ : Optional[int] = TapasForMaskedLM(config=__lowerCamelCase )
elif task == "INTERMEDIATE_PRETRAINING":
UpperCAmelCase_ : Any = TapasModel(config=__lowerCamelCase )
else:
raise ValueError(f"""Task {task} not supported.""" )
print(f"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# Save pytorch-model (weights and configuration)
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__lowerCamelCase )
# Save tokenizer files
print(f"""Save tokenizer files to {pytorch_dump_path}""" )
UpperCAmelCase_ : Dict = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt", model_max_length=512 )
tokenizer.save_pretrained(__lowerCamelCase )
print("Used relative position embeddings:", model.config.reset_position_index_per_cell )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_a = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 366
|
"""simple docstring"""
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_a = 0
_a = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_a = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_a = tuple[int, int]
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : int = pos_x
UpperCAmelCase_ : List[Any] = pos_y
UpperCAmelCase_ : Union[str, Any] = (pos_y, pos_x)
UpperCAmelCase_ : Any = goal_x
UpperCAmelCase_ : Dict = goal_y
UpperCAmelCase_ : Any = g_cost
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : int = self.calculate_heuristic()
UpperCAmelCase_ : Any = self.g_cost + self.h_cost
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.pos_x - self.goal_x
UpperCAmelCase_ : Union[str, Any] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowercase_ ) + abs(lowercase_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , lowercase_ ):
"""simple docstring"""
return self.f_cost < other.f_cost
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowercase_ )
UpperCAmelCase_ : List[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , lowercase_ )
UpperCAmelCase_ : str = [self.start]
UpperCAmelCase_ : list[Node] = []
UpperCAmelCase_ : int = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCAmelCase_ : List[str] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowercase_ )
self.closed_nodes.append(lowercase_ )
UpperCAmelCase_ : str = self.get_successors(lowercase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowercase_ )
else:
# retrieve the best current path
UpperCAmelCase_ : Union[str, Any] = self.open_nodes.pop(self.open_nodes.index(lowercase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowercase_ )
else:
self.open_nodes.append(lowercase_ )
return [self.start.pos]
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = []
for action in delta:
UpperCAmelCase_ : str = parent.pos_x + action[1]
UpperCAmelCase_ : int = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowercase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowercase_ , lowercase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowercase_ , ) )
return successors
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = node
UpperCAmelCase_ : int = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCAmelCase_ : Optional[int] = current_node.parent
path.reverse()
return path
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = AStar(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[Any] = AStar(lowercase_ , lowercase_ )
UpperCAmelCase_ : Tuple = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
UpperCAmelCase_ : List[str] = self.fwd_astar.open_nodes.pop(0 )
UpperCAmelCase_ : List[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowercase_ , lowercase_ )
self.fwd_astar.closed_nodes.append(lowercase_ )
self.bwd_astar.closed_nodes.append(lowercase_ )
UpperCAmelCase_ : Tuple = current_bwd_node
UpperCAmelCase_ : str = current_fwd_node
UpperCAmelCase_ : Dict = {
self.fwd_astar: self.fwd_astar.get_successors(lowercase_ ),
self.bwd_astar: self.bwd_astar.get_successors(lowercase_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowercase_ )
else:
# retrieve the best current path
UpperCAmelCase_ : List[Any] = astar.open_nodes.pop(
astar.open_nodes.index(lowercase_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowercase_ )
else:
astar.open_nodes.append(lowercase_ )
return [self.fwd_astar.start.pos]
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.fwd_astar.retrace_path(lowercase_ )
UpperCAmelCase_ : int = self.bwd_astar.retrace_path(lowercase_ )
bwd_path.pop()
bwd_path.reverse()
UpperCAmelCase_ : Any = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_a = (0, 0)
_a = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_a = time.time()
_a = AStar(init, goal)
_a = a_star.search()
_a = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
_a = time.time()
_a = BidirectionalAStar(init, goal)
_a = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 23
| 0
|
"""simple docstring"""
import os
import sys
import transformers
_a = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 367
|
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = (PNDMScheduler,)
SCREAMING_SNAKE_CASE__ : str = (("""num_inference_steps""", 50),)
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : int = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**lowercase_ )
return config
def UpperCamelCase__ ( self , lowercase_=0 , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = dict(self.forward_default_kwargs )
UpperCAmelCase_ : List[str] = kwargs.pop("num_inference_steps" , lowercase_ )
UpperCAmelCase_ : Union[str, Any] = self.dummy_sample
UpperCAmelCase_ : Dict = 0.1 * sample
UpperCAmelCase_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : List[Any] = self.get_scheduler_config(**lowercase_ )
UpperCAmelCase_ : Dict = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
UpperCAmelCase_ : List[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
UpperCAmelCase_ : Optional[int] = scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
UpperCAmelCase_ : int = dummy_past_residuals[:]
UpperCAmelCase_ : List[str] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : str = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ : Optional[int] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Dict = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self , lowercase_=0 , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = dict(self.forward_default_kwargs )
UpperCAmelCase_ : str = kwargs.pop("num_inference_steps" , lowercase_ )
UpperCAmelCase_ : Optional[int] = self.dummy_sample
UpperCAmelCase_ : List[str] = 0.1 * sample
UpperCAmelCase_ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : str = self.get_scheduler_config()
UpperCAmelCase_ : Dict = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ : List[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
UpperCAmelCase_ : Dict = scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ : Optional[Any] = dummy_past_residuals[:]
UpperCAmelCase_ : Union[str, Any] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Dict = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ : List[str] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : int = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = self.scheduler_classes[0]
UpperCAmelCase_ : Union[str, Any] = self.get_scheduler_config(**lowercase_ )
UpperCAmelCase_ : List[Any] = scheduler_class(**lowercase_ )
UpperCAmelCase_ : Tuple = 10
UpperCAmelCase_ : List[str] = self.dummy_model()
UpperCAmelCase_ : str = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase_ : Tuple = model(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[int] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase_ : Any = model(lowercase_ , lowercase_ )
UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = dict(self.forward_default_kwargs )
UpperCAmelCase_ : Optional[Any] = kwargs.pop("num_inference_steps" , lowercase_ )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : Any = self.get_scheduler_config()
UpperCAmelCase_ : Tuple = scheduler_class(**lowercase_ )
UpperCAmelCase_ : str = self.dummy_sample
UpperCAmelCase_ : List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase_ , "set_timesteps" ):
scheduler.set_timesteps(lowercase_ )
elif num_inference_steps is not None and not hasattr(lowercase_ , "set_timesteps" ):
UpperCAmelCase_ : List[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase_ : List[str] = dummy_past_residuals[:]
UpperCAmelCase_ : str = scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Any = scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
UpperCAmelCase_ : Optional[Any] = scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase_ )
UpperCAmelCase_ : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase_ : int = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase_ : Optional[Any] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase_ : List[Any] = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ : List[Any] = self.dummy_sample
UpperCAmelCase_ : Optional[int] = 0.1 * sample
UpperCAmelCase_ : List[str] = self.get_scheduler_config()
UpperCAmelCase_ : List[str] = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase_ : List[str] = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
with self.assertRaises(lowercase_ ):
UpperCAmelCase_ : List[str] = self.scheduler_classes[0]
UpperCAmelCase_ : str = self.get_scheduler_config()
UpperCAmelCase_ : Tuple = scheduler_class(**lowercase_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.full_loop()
UpperCAmelCase_ : Any = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Dict = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_98.13_18 ) < 1E-2
assert abs(result_mean.item() - 0.25_80 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.full_loop(prediction_type="v_prediction" )
UpperCAmelCase_ : str = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Tuple = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 67.39_86 ) < 1E-2
assert abs(result_mean.item() - 0.08_78 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : Union[str, Any] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
UpperCAmelCase_ : List[Any] = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : int = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 2_30.03_99 ) < 1E-2
assert abs(result_mean.item() - 0.29_95 ) < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ : Tuple = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
UpperCAmelCase_ : int = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase_ : Tuple = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_86.94_82 ) < 1E-2
assert abs(result_mean.item() - 0.24_34 ) < 1E-3
| 23
| 0
|
from __future__ import annotations
from typing import Any
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : int = num_of_nodes
UpperCAmelCase_ : list[list[int]] = []
UpperCAmelCase_ : dict[int, int] = {}
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
self.m_edges.append([u_node, v_node, weight] )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
if self.m_component[u_node] != u_node:
for k in self.m_component:
UpperCAmelCase_ : Optional[int] = self.find_component(lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
if component_size[u_node] <= component_size[v_node]:
UpperCAmelCase_ : Optional[Any] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowercase_ )
elif component_size[u_node] >= component_size[v_node]:
UpperCAmelCase_ : Optional[Any] = self.find_component(lowercase_ )
component_size[u_node] += component_size[v_node]
self.set_component(lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
UpperCAmelCase_ : int = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
UpperCAmelCase_ : str = edge
UpperCAmelCase_ : Dict = self.m_component[u]
UpperCAmelCase_ : str = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
UpperCAmelCase_ : List[Any] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : Tuple = edge
UpperCAmelCase_ : List[Any] = self.m_component[u]
UpperCAmelCase_ : Union[str, Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowercase_ , lowercase_ , lowercase_ )
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
UpperCAmelCase_ : List[Any] = [-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def __a ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368
|
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_a = object()
# For specifying empty leaf dict `{}`
_a = object()
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Any = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(__lowerCamelCase ) - len(__lowerCamelCase ) + 1 ):
UpperCAmelCase_ : List[str] = [x.match(__lowerCamelCase ) for x, y in zip(__lowerCamelCase, ks[i:] )]
if matches and all(__lowerCamelCase ):
return True
return False
def __a ( __lowerCamelCase ):
def replace(__lowerCamelCase, __lowerCamelCase ):
for rule, replacement in rules:
if _match(__lowerCamelCase, __lowerCamelCase ):
return replacement
return val
return replace
def __a ( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp", __lowerCamelCase )),
(("transformer", "wte", "embedding"), P("mp", __lowerCamelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCamelCase, "mp" )),
(("attention", "out_proj", "kernel"), P("mp", __lowerCamelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__lowerCamelCase, "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp", __lowerCamelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : List[str] = _get_partition_rules()
UpperCAmelCase_ : Any = _replacement_rules(__lowerCamelCase )
UpperCAmelCase_ : Any = {k: _unmatched for k in flatten_dict(__lowerCamelCase )}
UpperCAmelCase_ : Dict = {k: replace(__lowerCamelCase, __lowerCamelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__lowerCamelCase ) )
| 23
| 0
|
"""simple docstring"""
def __a ( __lowerCamelCase ) -> Tuple:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
UpperCAmelCase_ : Dict = gray_code_sequence_string(__lowerCamelCase )
#
# convert them to integers
for i in range(len(__lowerCamelCase ) ):
UpperCAmelCase_ : str = int(sequence[i], 2 )
return sequence
def __a ( __lowerCamelCase ) -> Tuple:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
UpperCAmelCase_ : Dict = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
UpperCAmelCase_ : int = gray_code_sequence_string(bit_count - 1 )
UpperCAmelCase_ : Dict = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
UpperCAmelCase_ : List[Any] = "0" + smaller_sequence[i]
sequence.append(__lowerCamelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
UpperCAmelCase_ : Union[str, Any] = "1" + smaller_sequence[i]
sequence.append(__lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369
|
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_a = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = [file for file in os.listdir(lowercase_ ) if os.path.isfile(os.path.join(lowercase_ , lowercase_ ) )]
if identifier is not None:
UpperCAmelCase_ : Dict = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase_ , lowercase_ ):
for n_ in n_identifier:
UpperCAmelCase_ : str = [file for file in files if n_ not in file]
else:
UpperCAmelCase_ : Any = [file for file in files if n_identifier not in file]
UpperCAmelCase_ : Union[str, Any] = ignore_files or []
ignore_files.append("__init__.py" )
UpperCAmelCase_ : Optional[int] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , lowercase_ )
if only_modules:
UpperCAmelCase_ : str = file.split("." )[0]
try:
UpperCAmelCase_ : str = getattr(lowercase_ , lowercase_ )
UpperCAmelCase_ : Tuple = doctest.DocTestSuite(lowercase_ )
UpperCAmelCase_ : int = unittest.TextTestRunner().run(lowercase_ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
UpperCAmelCase_ : Optional[Any] = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = Path("src/transformers" )
UpperCAmelCase_ : str = "modeling"
UpperCAmelCase_ : Optional[Any] = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = Path("src/transformers" )
UpperCAmelCase_ : Any = "tokenization"
self.analyze_directory(lowercase_ , identifier=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = Path("src/transformers" )
UpperCAmelCase_ : List[Any] = "configuration"
self.analyze_directory(lowercase_ , identifier=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = Path("src/transformers" )
UpperCAmelCase_ : List[Any] = ["configuration", "modeling", "tokenization"]
self.analyze_directory(lowercase_ , n_identifier=lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = Path("docs/source" )
UpperCAmelCase_ : Union[str, Any] = ["favicon.ico"]
self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_ )
| 23
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_a = logging.get_logger(__name__)
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""pixel_values"""]
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PIL.Image.BICUBIC , lowercase_ = True , lowercase_ = None , lowercase_ = 1 / 255 , lowercase_ = True , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ):
"""simple docstring"""
super().__init__(**lowercase_ )
UpperCAmelCase_ : List[Any] = size if size is not None else {"height": 256, "width": 256}
UpperCAmelCase_ : Dict = get_size_dict(lowercase_ )
UpperCAmelCase_ : Optional[Any] = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ : List[str] = get_size_dict(lowercase_ , param_name="crop_size" )
UpperCAmelCase_ : Dict = do_resize
UpperCAmelCase_ : int = size
UpperCAmelCase_ : str = resample
UpperCAmelCase_ : str = do_center_crop
UpperCAmelCase_ : Dict = crop_size
UpperCAmelCase_ : Dict = do_rescale
UpperCAmelCase_ : str = rescale_factor
UpperCAmelCase_ : List[Any] = do_normalize
UpperCAmelCase_ : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ = PIL.Image.BICUBIC , lowercase_ = None , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
lowercase_ , size=(size["height"], size["width"]) , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(lowercase_ , size=(size["height"], size["width"]) , data_format=lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ):
"""simple docstring"""
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ):
"""simple docstring"""
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_=None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : Dict = resample if resample is not None else self.resample
UpperCAmelCase_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : Tuple = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : Optional[int] = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : Optional[Any] = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Tuple = size if size is not None else self.size
UpperCAmelCase_ : Union[str, Any] = get_size_dict(lowercase_ )
UpperCAmelCase_ : Optional[int] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : Union[str, Any] = get_size_dict(lowercase_ , param_name="crop_size" )
UpperCAmelCase_ : Union[str, Any] = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ : Dict = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
UpperCAmelCase_ : Dict = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_center_crop:
UpperCAmelCase_ : Dict = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images]
if do_rescale:
UpperCAmelCase_ : str = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
UpperCAmelCase_ : List[str] = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
UpperCAmelCase_ : Tuple = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
UpperCAmelCase_ : List[str] = {"pixel_values": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 370
|
"""simple docstring"""
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
_a = (
'This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'
)
def __a ( __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
return (preds == labels).mean()
def __a ( __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
UpperCAmelCase_ : Optional[Any] = simple_accuracy(__lowerCamelCase, __lowerCamelCase )
UpperCAmelCase_ : List[Any] = fa_score(y_true=__lowerCamelCase, y_pred=__lowerCamelCase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def __a ( __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
UpperCAmelCase_ : Any = pearsonr(__lowerCamelCase, __lowerCamelCase )[0]
UpperCAmelCase_ : Optional[Any] = spearmanr(__lowerCamelCase, __lowerCamelCase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
assert len(__lowerCamelCase ) == len(__lowerCamelCase ), f"""Predictions and labels have mismatched lengths {len(__lowerCamelCase )} and {len(__lowerCamelCase )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "mrpc":
return acc_and_fa(__lowerCamelCase, __lowerCamelCase )
elif task_name == "sts-b":
return pearson_and_spearman(__lowerCamelCase, __lowerCamelCase )
elif task_name == "qqp":
return acc_and_fa(__lowerCamelCase, __lowerCamelCase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "rte":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
elif task_name == "hans":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
else:
raise KeyError(__lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
warnings.warn(__lowerCamelCase, __lowerCamelCase )
requires_backends(__lowerCamelCase, "sklearn" )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError(f"""Predictions and labels have mismatched lengths {len(__lowerCamelCase )} and {len(__lowerCamelCase )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
else:
raise KeyError(__lowerCamelCase )
| 23
| 0
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class A_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str]
SCREAMING_SNAKE_CASE__ : Optional[str] = None
# Automatically constructed
SCREAMING_SNAKE_CASE__ : ClassVar[str] = "dict"
SCREAMING_SNAKE_CASE__ : ClassVar[Any] = None
SCREAMING_SNAKE_CASE__ : str = field(default="""Translation""" ,init=lowercase__ ,repr=lowercase__ )
def __call__( self ):
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
from .features import Value
return {k: Value("string" ) for k in sorted(self.languages )}
@dataclass
class A_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[List] = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : Optional[str] = None
# Automatically constructed
SCREAMING_SNAKE_CASE__ : ClassVar[str] = "dict"
SCREAMING_SNAKE_CASE__ : ClassVar[Any] = None
SCREAMING_SNAKE_CASE__ : str = field(default="""TranslationVariableLanguages""" ,init=lowercase__ ,repr=lowercase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = sorted(set(self.languages ) ) if self.languages else None
UpperCAmelCase_ : List[str] = len(self.languages ) if self.languages else None
def __call__( self ):
"""simple docstring"""
return pa.struct({"language": pa.list_(pa.string() ), "translation": pa.list_(pa.string() )} )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = set(self.languages )
if self.languages and set(lowercase_ ) - lang_set:
raise ValueError(
F"""Some languages in example ({", ".join(sorted(set(lowercase_ ) - lang_set ) )}) are not in valid set ({", ".join(lowercase_ )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
UpperCAmelCase_ : Optional[Any] = []
for lang, text in translation_dict.items():
if isinstance(lowercase_ , lowercase_ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
UpperCAmelCase_ : Union[str, Any] = zip(*sorted(lowercase_ ) )
return {"language": languages, "translation": translations}
def UpperCamelCase__ ( self ):
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value("string" ) ),
"translation": Sequence(Value("string" ) ),
}
| 371
|
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {'vocab_file': 'vocab.json'}
_a = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
_a = {'mgp-str': 27}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowercase_ , lowercase_="[GO]" , lowercase_="[GO]" , lowercase_="[s]" , lowercase_="[GO]" , **lowercase_ ):
"""simple docstring"""
super().__init__(
unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , pad_token=lowercase_ , **lowercase_ , )
with open(lowercase_ , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase_ : Dict = json.load(lowercase_ )
UpperCAmelCase_ : Dict = {v: k for k, v in self.vocab.items()}
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.vocab )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = []
for s in text:
char_tokens.extend(lowercase_ )
return char_tokens
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return self.vocab.get(lowercase_ , self.vocab.get(self.unk_token ) )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return self.decoder.get(lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = None ):
"""simple docstring"""
if not os.path.isdir(lowercase_ ):
logger.error("Vocabulary path ({}) should be a directory".format(lowercase_ ) )
return
UpperCAmelCase_ : Optional[int] = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(lowercase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowercase_ , ensure_ascii=lowercase_ ) + "\n" )
return (vocab_file,)
| 23
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
SCREAMING_SNAKE_CASE : Optional[Any] = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[int] = ["""DPTFeatureExtractor"""]
SCREAMING_SNAKE_CASE : Any = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 24
|
"""simple docstring"""
def lowercase ( ) ->int:
"""simple docstring"""
return [
a * b * (1_000 - a - b)
for a in range(1 , 999 )
for b in range(_snake_case , 999 )
if (a * a + b * b == (1_000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 24
| 1
|
"""simple docstring"""
from __future__ import annotations
SCREAMING_SNAKE_CASE : Optional[int] = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def lowercase ( _snake_case : list[list[int]] , _snake_case : list[int] , _snake_case : list[int] , _snake_case : int , _snake_case : list[list[int]] , ) ->tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
__snake_case : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_snake_case ) )
] # the reference grid
__snake_case : Tuple = 1
__snake_case : str = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_snake_case ) )
] # the action grid
__snake_case : Tuple = init[0]
__snake_case : str = init[1]
__snake_case : List[str] = 0
__snake_case : int = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case : Tuple = [[f, g, x, y]]
__snake_case : Union[str, Any] = False # flag that is set when search is complete
__snake_case : Optional[Any] = False # flag set if we can't find expand
while not found and not resign:
if len(_snake_case ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case : Dict = cell.pop()
__snake_case : Optional[Any] = next_cell[2]
__snake_case : Tuple = next_cell[3]
__snake_case : Tuple = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case : Dict = True
else:
for i in range(len(_snake_case ) ): # to try out different valid actions
__snake_case : int = x + DIRECTIONS[i][0]
__snake_case : int = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_snake_case ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case : Optional[Any] = g + cost
__snake_case : str = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case : Dict = 1
__snake_case : Union[str, Any] = i
__snake_case : str = []
__snake_case : List[Any] = goal[0]
__snake_case : str = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case : List[Any] = x - DIRECTIONS[action[x][y]][0]
__snake_case : List[str] = y - DIRECTIONS[action[x][y]][1]
__snake_case : Dict = xa
__snake_case : Union[str, Any] = ya
invpath.append([x, y] )
__snake_case : Union[str, Any] = []
for i in range(len(_snake_case ) ):
path.append(invpath[len(_snake_case ) - 1 - i] )
return path, action
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[str] = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
SCREAMING_SNAKE_CASE : List[Any] = [0, 0]
# all coordinates are given in format [y,x]
SCREAMING_SNAKE_CASE : Optional[int] = [len(grid) - 1, len(grid[0]) - 1]
SCREAMING_SNAKE_CASE : Any = 1
# the cost map which pushes the path closer to the goal
SCREAMING_SNAKE_CASE : Dict = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
SCREAMING_SNAKE_CASE : List[Any] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
SCREAMING_SNAKE_CASE : Optional[int] = 99
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = search(grid, init, goal, cost, heuristic)
print("""ACTION MAP""")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 24
|
"""simple docstring"""
def lowercase ( _snake_case : int = 100 ) ->int:
"""simple docstring"""
__snake_case : str = n * (n + 1) * (2 * n + 1) / 6
__snake_case : Dict = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }')
| 24
| 1
|
"""simple docstring"""
import json
import sys
def lowercase ( _snake_case : Optional[int] , _snake_case : Optional[int] ) ->List[str]:
"""simple docstring"""
with open(_snake_case , encoding='''utf-8''' ) as f:
__snake_case : Optional[int] = json.load(_snake_case )
__snake_case : Optional[int] = ['''<details>''', '''<summary>Show updated benchmarks!</summary>''', ''' ''']
for benchmark_name in sorted(_snake_case ):
__snake_case : List[str] = results[benchmark_name]
__snake_case : List[str] = benchmark_name.split('''/''' )[-1]
output_md.append(f"""### Benchmark: {benchmark_file_name}""" )
__snake_case : Optional[int] = '''| metric |'''
__snake_case : Any = '''|--------|'''
__snake_case : Dict = '''| new / old (diff) |'''
for metric_name in sorted(_snake_case ):
__snake_case : List[Any] = benchmark_res[metric_name]
__snake_case : Any = metric_vals['''new''']
__snake_case : str = metric_vals.get('''old''' , _snake_case )
__snake_case : Tuple = metric_vals.get('''diff''' , _snake_case )
__snake_case : Any = f""" {new_val:f}""" if isinstance(_snake_case , (int, float) ) else '''None'''
if old_val is not None:
val_str += f""" / {old_val:f}""" if isinstance(_snake_case , (int, float) ) else "None"
if dif_val is not None:
val_str += f""" ({dif_val:f})""" if isinstance(_snake_case , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('''</details>''' )
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.writelines('''\n'''.join(_snake_case ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : str = sys.argv[1]
SCREAMING_SNAKE_CASE : Dict = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 24
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
SCREAMING_SNAKE_CASE : int = datasets.utils.logging.get_logger(__name__)
@dataclass
class _UpperCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
lowerCamelCase__ =10000
lowerCamelCase__ =None
lowerCamelCase__ =None
class _UpperCAmelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
lowerCamelCase__ =ParquetConfig
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__snake_case : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a_ , (str, list, tuple) ):
__snake_case : Union[str, Any] = data_files
if isinstance(a_ , a_ ):
__snake_case : Union[str, Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__snake_case : List[Any] = [dl_manager.iter_files(a_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__snake_case : int = []
for split_name, files in data_files.items():
if isinstance(a_ , a_ ):
__snake_case : List[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__snake_case : int = [dl_manager.iter_files(a_ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(a_ ):
with open(a_ , '''rb''' ) as f:
__snake_case : Any = datasets.Features.from_arrow_schema(pq.read_schema(a_ ) )
break
splits.append(datasets.SplitGenerator(name=a_ , gen_kwargs={'''files''': files} ) )
return splits
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__snake_case : List[Any] = table_cast(a_ , self.info.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : List[Any] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(a_ ) ):
with open(a_ , '''rb''' ) as f:
__snake_case : int = pq.ParquetFile(a_ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__snake_case : Dict = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"""{file_idx}_{batch_idx}""", self._cast_table(a_ )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(a_ )}: {e}""" )
raise
| 24
| 1
|
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCAmelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ ='hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def SCREAMING_SNAKE_CASE (self , a_=0 ):
'''simple docstring'''
__snake_case : Tuple = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(a_ ) )
__snake_case : Optional[int] = np.random.RandomState(a_ )
__snake_case : List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.75,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=a_ )
__snake_case : Optional[int] = self.get_dummy_inputs()
__snake_case : List[Any] = pipe(**a_ ).images
__snake_case : Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_28, 1_28, 3)
__snake_case : str = np.array([0.6_9643, 0.5_8484, 0.5_0314, 0.5_8760, 0.5_5368, 0.5_9643, 0.5_1529, 0.4_1217, 0.4_9087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case : List[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case : Union[str, Any] = self.get_dummy_inputs()
__snake_case : Optional[int] = pipe(**a_ ).images
__snake_case : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
__snake_case : Optional[Any] = np.array([0.6_1737, 0.5_4642, 0.5_3183, 0.5_4465, 0.5_2742, 0.6_0525, 0.4_9969, 0.4_0655, 0.4_8154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a_ )
# warmup pass to apply optimizations
__snake_case : Optional[int] = pipe(**self.get_dummy_inputs() )
__snake_case : Optional[int] = self.get_dummy_inputs()
__snake_case : Any = pipe(**a_ ).images
__snake_case : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
__snake_case : List[str] = np.array([0.5_2761, 0.5_9977, 0.4_9033, 0.4_9619, 0.5_4282, 0.5_0311, 0.4_7600, 0.4_0918, 0.4_5203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case : List[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a_ )
__snake_case : Optional[int] = self.get_dummy_inputs()
__snake_case : Any = pipe(**a_ ).images
__snake_case : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
__snake_case : Dict = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a_ )
__snake_case : Union[str, Any] = self.get_dummy_inputs()
__snake_case : Optional[Any] = pipe(**a_ ).images
__snake_case : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
__snake_case : Optional[Any] = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__snake_case : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a_ )
__snake_case : Optional[int] = self.get_dummy_inputs()
__snake_case : int = pipe(**a_ ).images
__snake_case : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
__snake_case : Dict = np.array([0.6_5331, 0.5_8277, 0.4_8204, 0.5_6059, 0.5_3665, 0.5_6235, 0.5_0969, 0.4_0009, 0.4_6552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = ort.SessionOptions()
__snake_case : Dict = False
return options
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__snake_case : Any = init_image.resize((7_68, 5_12) )
# using the PNDM scheduler by default
__snake_case : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=a_ , feature_extractor=a_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a_ )
__snake_case : Tuple = '''A fantasy landscape, trending on artstation'''
__snake_case : List[Any] = np.random.RandomState(0 )
__snake_case : int = pipe(
prompt=a_ , image=a_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=a_ , output_type='''np''' , )
__snake_case : List[Any] = output.images
__snake_case : Dict = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
__snake_case : int = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__snake_case : List[Any] = init_image.resize((7_68, 5_12) )
__snake_case : Any = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
__snake_case : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=a_ , safety_checker=a_ , feature_extractor=a_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a_ )
__snake_case : str = '''A fantasy landscape, trending on artstation'''
__snake_case : Union[str, Any] = np.random.RandomState(0 )
__snake_case : List[str] = pipe(
prompt=a_ , image=a_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=a_ , output_type='''np''' , )
__snake_case : List[str] = output.images
__snake_case : Union[str, Any] = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
__snake_case : Optional[int] = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 24
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
__snake_case : Dict = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = '''sshleifer/tiny-gpt2'''
__snake_case : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , )
__snake_case : Optional[int] = TensorFlowBenchmark(a_ )
__snake_case : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = '''sgugger/tiny-distilbert-classification'''
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , only_pretrain_model=a_ , )
__snake_case : Optional[Any] = TensorFlowBenchmark(a_ )
__snake_case : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2'''
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : Any = TensorFlowBenchmark(a_ )
__snake_case : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = '''sshleifer/tiny-gpt2'''
__snake_case : Union[str, Any] = AutoConfig.from_pretrained(a_ )
__snake_case : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , )
__snake_case : List[str] = TensorFlowBenchmark(a_ , [config] )
__snake_case : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = '''sshleifer/tiny-gpt2'''
__snake_case : Optional[Any] = AutoConfig.from_pretrained(a_ )
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : Dict = TensorFlowBenchmark(a_ , [config] )
__snake_case : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = '''sshleifer/tiny-gpt2'''
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : int = TensorFlowBenchmark(a_ )
__snake_case : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = '''sshleifer/tiny-gpt2'''
__snake_case : Dict = AutoConfig.from_pretrained(a_ )
__snake_case : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : List[Any] = TensorFlowBenchmark(a_ , [config] )
__snake_case : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = '''patrickvonplaten/t5-tiny-random'''
__snake_case : Tuple = AutoConfig.from_pretrained(a_ )
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : List[str] = TensorFlowBenchmark(a_ , configs=[config] )
__snake_case : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = '''sshleifer/tiny-gpt2'''
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a_ , multi_process=a_ , )
__snake_case : Optional[int] = TensorFlowBenchmark(a_ )
__snake_case : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a_ , save_to_csv=a_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a_ , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(a_ , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(a_ , '''env.csv''' ) , multi_process=a_ , )
__snake_case : Union[str, Any] = TensorFlowBenchmark(a_ )
benchmark.run()
self.assertTrue(Path(os.path.join(a_ , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(a_ , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(a_ , '''env.csv''' ) ).exists() )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(a_ ):
self.assertTrue(hasattr(a_ , '''sequential''' ) )
self.assertTrue(hasattr(a_ , '''cumulative''' ) )
self.assertTrue(hasattr(a_ , '''current''' ) )
self.assertTrue(hasattr(a_ , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a_ , '''log.txt''' ) , log_print=a_ , trace_memory_line_by_line=a_ , eager_mode=a_ , multi_process=a_ , )
__snake_case : List[Any] = TensorFlowBenchmark(a_ )
__snake_case : Optional[int] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(a_ , '''log.txt''' ) ).exists() )
| 24
| 1
|
"""simple docstring"""
from __future__ import annotations
SCREAMING_SNAKE_CASE : Optional[int] = list[list[int]]
# assigning initial values to the grid
SCREAMING_SNAKE_CASE : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
SCREAMING_SNAKE_CASE : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowercase ( _snake_case : Matrix , _snake_case : int , _snake_case : int , _snake_case : int ) ->bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowercase ( _snake_case : Matrix ) ->tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowercase ( _snake_case : Matrix ) ->Matrix | None:
"""simple docstring"""
if location := find_empty_location(_snake_case ):
__snake_case , __snake_case : List[str] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(_snake_case , _snake_case , _snake_case , _snake_case ):
__snake_case : Any = digit
if sudoku(_snake_case ) is not None:
return grid
__snake_case : List[Any] = 0
return None
def lowercase ( _snake_case : Matrix ) ->None:
"""simple docstring"""
for row in grid:
for cell in row:
print(_snake_case , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 20)
print_solution(example_grid)
print("""\nExample grid solution:""")
SCREAMING_SNAKE_CASE : Tuple = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 24
|
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
SCREAMING_SNAKE_CASE : Tuple = None
try:
import msvcrt
except ImportError:
SCREAMING_SNAKE_CASE : List[str] = None
try:
import fcntl
except ImportError:
SCREAMING_SNAKE_CASE : Tuple = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
SCREAMING_SNAKE_CASE : List[str] = OSError
# Data
# ------------------------------------------------
SCREAMING_SNAKE_CASE : List[Any] = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
SCREAMING_SNAKE_CASE : List[Any] = """3.0.12"""
SCREAMING_SNAKE_CASE : int = None
def lowercase ( ) ->str:
"""simple docstring"""
global _logger
__snake_case : Union[str, Any] = _logger or logging.getLogger(__name__ )
return _logger
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ ):
'''simple docstring'''
__snake_case : Optional[int] = lock_file
return None
def __str__(self ):
'''simple docstring'''
__snake_case : Tuple = f"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ ):
'''simple docstring'''
__snake_case : Optional[Any] = lock
return None
def __enter__(self ):
'''simple docstring'''
return self.lock
def __exit__(self , a_ , a_ , a_ ):
'''simple docstring'''
self.lock.release()
return None
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=-1 , a_=None ):
'''simple docstring'''
__snake_case : List[Any] = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
__snake_case : Dict = self.hash_filename_if_too_long(a_ , a_ )
# The path to the lock file.
__snake_case : str = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__snake_case : Dict = None
# The default timeout value.
__snake_case : List[Any] = timeout
# We use this lock primarily for the lock counter.
__snake_case : Tuple = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__snake_case : Optional[Any] = 0
return None
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._lock_file
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._timeout
@timeout.setter
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Dict = float(a_ )
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
raise NotImplementedError()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
raise NotImplementedError()
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._lock_file_fd is not None
def SCREAMING_SNAKE_CASE (self , a_=None , a_=0.05 ):
'''simple docstring'''
if timeout is None:
__snake_case : List[str] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__snake_case : Optional[int] = id(self )
__snake_case : str = self._lock_file
__snake_case : Optional[int] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(f"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
f"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(a_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__snake_case : Optional[int] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def SCREAMING_SNAKE_CASE (self , a_=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__snake_case : Tuple = id(self )
__snake_case : str = self._lock_file
logger().debug(f"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
__snake_case : Dict = 0
logger().debug(f"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__(self ):
'''simple docstring'''
self.acquire()
return self
def __exit__(self , a_ , a_ , a_ ):
'''simple docstring'''
self.release()
return None
def __del__(self ):
'''simple docstring'''
self.release(force=a_ )
return None
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : Any = os.path.basename(a_ )
if len(a_ ) > max_length and max_length > 0:
__snake_case : List[Any] = os.path.dirname(a_ )
__snake_case : Any = str(hash(a_ ) )
__snake_case : List[Any] = filename[: max_length - len(a_ ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(a_ , a_ )
else:
return path
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_=-1 , a_=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(a_ , timeout=a_ , max_filename_length=a_ )
__snake_case : List[str] = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__snake_case : Any = os.open(self._lock_file , a_ )
except OSError:
pass
else:
try:
msvcrt.locking(a_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(a_ )
else:
__snake_case : Dict = fd
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self._lock_file_fd
__snake_case : Dict = None
msvcrt.locking(a_ , msvcrt.LK_UNLCK , 1 )
os.close(a_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_=-1 , a_=None ):
'''simple docstring'''
__snake_case : Optional[Any] = os.statvfs(os.path.dirname(a_ ) ).f_namemax
super().__init__(a_ , timeout=a_ , max_filename_length=a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__snake_case : List[str] = os.open(self._lock_file , a_ )
try:
fcntl.flock(a_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(a_ )
else:
__snake_case : Optional[int] = fd
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self._lock_file_fd
__snake_case : Tuple = None
fcntl.flock(a_ , fcntl.LOCK_UN )
os.close(a_ )
return None
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__snake_case : Tuple = os.open(self._lock_file , a_ )
except OSError:
pass
else:
__snake_case : List[Any] = fd
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
os.close(self._lock_file_fd )
__snake_case : int = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
SCREAMING_SNAKE_CASE : Dict = None
if msvcrt:
SCREAMING_SNAKE_CASE : List[Any] = WindowsFileLock
elif fcntl:
SCREAMING_SNAKE_CASE : List[str] = UnixFileLock
else:
SCREAMING_SNAKE_CASE : str = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 24
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=13 , a_=32 , a_=3 , a_=4 , a_=[10, 20, 30, 40] , a_=[2, 2, 3, 2] , a_=True , a_=True , a_=37 , a_="gelu" , a_=10 , a_=0.02 , a_=["stage2", "stage3", "stage4"] , a_=[2, 3, 4] , a_=None , ):
'''simple docstring'''
__snake_case : Dict = parent
__snake_case : Tuple = batch_size
__snake_case : Optional[int] = image_size
__snake_case : List[Any] = num_channels
__snake_case : List[str] = num_stages
__snake_case : int = hidden_sizes
__snake_case : List[str] = depths
__snake_case : Any = is_training
__snake_case : int = use_labels
__snake_case : List[str] = intermediate_size
__snake_case : Optional[int] = hidden_act
__snake_case : Dict = num_labels
__snake_case : List[Any] = initializer_range
__snake_case : Optional[Any] = out_features
__snake_case : Union[str, Any] = out_indices
__snake_case : Dict = scope
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Tuple = None
if self.use_labels:
__snake_case : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : List[str] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : int = ConvNextModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case : str = model(a_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : str = ConvNextForImageClassification(a_ )
model.to(a_ )
model.eval()
__snake_case : Any = model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : int = ConvNextBackbone(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Dict = model(a_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__snake_case : List[str] = None
__snake_case : str = ConvNextBackbone(config=a_ )
model.to(a_ )
model.eval()
__snake_case : int = model(a_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : List[str] = config_and_inputs
__snake_case : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__ =(
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ =True
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = ConvNextModelTester(self )
__snake_case : str = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = model_class(a_ )
__snake_case : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : str = [*signature.parameters.keys()]
__snake_case : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
def check_hidden_states_output(a_ , a_ , a_ ):
__snake_case : Tuple = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
__snake_case : List[str] = model(**self._prepare_for_class(a_ , a_ ) )
__snake_case : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(a_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Tuple = True
check_hidden_states_output(a_ , a_ , a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Dict = True
check_hidden_states_output(a_ , a_ , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Any = ConvNextModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def lowercase ( ) ->Dict:
"""simple docstring"""
__snake_case : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(a_ )
__snake_case : Tuple = self.default_image_processor
__snake_case : int = prepare_img()
__snake_case : Union[str, Any] = image_processor(images=a_ , return_tensors='''pt''' ).to(a_ )
# forward pass
with torch.no_grad():
__snake_case : str = model(**a_ )
# verify the logits
__snake_case : str = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , a_ )
__snake_case : int = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a_ , atol=1E-4 ) )
@require_torch
class _UpperCAmelCase ( unittest.TestCase, __snake_case ):
'''simple docstring'''
lowerCamelCase__ =(ConvNextBackbone,) if is_torch_available() else ()
lowerCamelCase__ =ConvNextConfig
lowerCamelCase__ =False
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = ConvNextModelTester(self )
| 24
|
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=24 , a_=2 , a_=6 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=None , a_=10_00 , ):
'''simple docstring'''
__snake_case : Any = parent
__snake_case : int = batch_size
__snake_case : Dict = seq_length
__snake_case : List[str] = is_training
__snake_case : List[Any] = use_input_mask
__snake_case : int = use_token_type_ids
__snake_case : Union[str, Any] = use_labels
__snake_case : str = vocab_size
__snake_case : int = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : int = num_attention_heads
__snake_case : str = intermediate_size
__snake_case : Union[str, Any] = hidden_act
__snake_case : int = hidden_dropout_prob
__snake_case : Union[str, Any] = attention_probs_dropout_prob
__snake_case : List[Any] = max_position_embeddings
__snake_case : Any = type_vocab_size
__snake_case : Dict = type_sequence_label_size
__snake_case : Optional[Any] = initializer_range
__snake_case : Union[str, Any] = num_labels
__snake_case : Any = scope
__snake_case : Any = range_bbox
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : int = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case : List[str] = bbox[i, j, 3]
__snake_case : Any = bbox[i, j, 1]
__snake_case : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case : List[str] = bbox[i, j, 2]
__snake_case : Union[str, Any] = bbox[i, j, 0]
__snake_case : Dict = t
__snake_case : Optional[int] = None
if self.use_input_mask:
__snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__snake_case : Dict = None
if self.use_token_type_ids:
__snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : List[str] = None
__snake_case : Union[str, Any] = None
if self.use_labels:
__snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : List[Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
__snake_case : Union[str, Any] = LiltModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Any = model(a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ )
__snake_case : str = model(a_ , bbox=a_ , token_type_ids=a_ )
__snake_case : List[str] = model(a_ , bbox=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
__snake_case : Optional[int] = self.num_labels
__snake_case : List[str] = LiltForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Tuple = model(
a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
__snake_case : Optional[Any] = LiltForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
__snake_case : int = model(
a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Dict = config_and_inputs
__snake_case : Any = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __snake_case, __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ =(
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =False
lowerCamelCase__ =False
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
return True
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = LiltModelTester(self )
__snake_case : Optional[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case : Dict = type
self.model_tester.create_and_check_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Any = LiltModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(a_ )
__snake_case : Dict = torch.tensor([[1, 2]] , device=a_ )
__snake_case : str = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a_ )
# forward pass
with torch.no_grad():
__snake_case : Union[str, Any] = model(input_ids=a_ , bbox=a_ )
__snake_case : Union[str, Any] = torch.Size([1, 2, 7_68] )
__snake_case : str = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=a_ , )
self.assertTrue(outputs.last_hidden_state.shape , a_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a_ , atol=1E-3 ) )
| 24
| 1
|
"""simple docstring"""
def lowercase ( _snake_case : int = 200 ) ->int:
"""simple docstring"""
__snake_case : List[str] = [1, 2, 5, 10, 20, 50, 100, 200]
__snake_case : Tuple = [0] * (pence + 1)
__snake_case : List[Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_snake_case , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 7_3682
| 24
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=False , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ):
'''simple docstring'''
__snake_case : List[Any] = parent
__snake_case : List[Any] = batch_size
__snake_case : str = seq_length
__snake_case : Any = is_training
__snake_case : Any = use_input_mask
__snake_case : str = use_token_type_ids
__snake_case : Dict = use_labels
__snake_case : int = vocab_size
__snake_case : Union[str, Any] = hidden_size
__snake_case : List[str] = num_hidden_layers
__snake_case : str = num_attention_heads
__snake_case : Optional[int] = intermediate_size
__snake_case : str = hidden_act
__snake_case : Union[str, Any] = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : str = max_position_embeddings
__snake_case : Dict = type_vocab_size
__snake_case : List[Any] = type_sequence_label_size
__snake_case : Union[str, Any] = initializer_range
__snake_case : str = num_labels
__snake_case : Dict = num_choices
__snake_case : Optional[int] = scope
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Dict = None
if self.use_input_mask:
__snake_case : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Tuple = None
__snake_case : List[str] = None
__snake_case : Dict = None
if self.use_labels:
__snake_case : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__snake_case : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : List[str] = DistilBertModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case : int = model(a_ , a_ )
__snake_case : List[Any] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Optional[Any] = DistilBertForMaskedLM(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Tuple = DistilBertForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Optional[Any] = model(
a_ , attention_mask=a_ , start_positions=a_ , end_positions=a_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Any = self.num_labels
__snake_case : Optional[int] = DistilBertForSequenceClassification(a_ )
model.to(a_ )
model.eval()
__snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = self.num_labels
__snake_case : Optional[int] = DistilBertForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Dict = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : List[Any] = self.num_choices
__snake_case : Any = DistilBertForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : Optional[int] = model(
a_ , attention_mask=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.prepare_config_and_inputs()
((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : str = config_and_inputs
__snake_case : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ =(
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = DistilBertModelTester(self )
__snake_case : List[str] = ConfigTester(self , config_class=a_ , dim=37 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Tuple = DistilBertModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__snake_case : List[str] = True
__snake_case : Tuple = model_class(config=a_ )
__snake_case : Any = self._prepare_for_class(a_ , a_ )
__snake_case : Dict = torch.jit.trace(
a_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a_ , os.path.join(a_ , '''traced_model.pt''' ) )
__snake_case : int = torch.jit.load(os.path.join(a_ , '''traced_model.pt''' ) , map_location=a_ )
loaded(inputs_dict['''input_ids'''].to(a_ ) , inputs_dict['''attention_mask'''].to(a_ ) )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__snake_case : List[Any] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__snake_case : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__snake_case : List[Any] = model(a_ , attention_mask=a_ )[0]
__snake_case : Tuple = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , a_ )
__snake_case : Optional[int] = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1E-4 ) )
| 24
| 1
|
"""simple docstring"""
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
SCREAMING_SNAKE_CASE : List[Any] = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE : Optional[int] = direct_transformers_import(PATH_TO_TRANSFORMERS)
SCREAMING_SNAKE_CASE : Tuple = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
SCREAMING_SNAKE_CASE : Optional[int] = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
SCREAMING_SNAKE_CASE : Any = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def lowercase ( _snake_case : str ) ->Dict:
"""simple docstring"""
__snake_case : Optional[Any] = None
# source code of `config_class`
__snake_case : Optional[Any] = inspect.getsource(_snake_case )
__snake_case : Dict = _re_checkpoint.findall(_snake_case )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
__snake_case : Optional[Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
__snake_case : Tuple = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
__snake_case : List[str] = ckpt_name
break
return checkpoint
def lowercase ( ) ->List[str]:
"""simple docstring"""
__snake_case : Tuple = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
__snake_case : List[str] = get_checkpoint_from_config_class(_snake_case )
__snake_case : Optional[Any] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_snake_case )
if len(_snake_case ) > 0:
__snake_case : Union[str, Any] = '''\n'''.join(sorted(_snake_case ) )
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 24
|
"""simple docstring"""
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( _snake_case : str , _snake_case : str , _snake_case : str ) ->List[Any]:
"""simple docstring"""
def get_masked_lm_array(_snake_case : str ):
__snake_case : int = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : str = tf.train.load_variable(_snake_case , _snake_case )
if "kernel" in name:
__snake_case : Any = array.transpose()
return torch.from_numpy(_snake_case )
def get_encoder_array(_snake_case : str ):
__snake_case : List[str] = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : Union[str, Any] = tf.train.load_variable(_snake_case , _snake_case )
if "kernel" in name:
__snake_case : Optional[int] = array.transpose()
return torch.from_numpy(_snake_case )
def get_encoder_layer_array(_snake_case : int , _snake_case : str ):
__snake_case : str = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : Optional[int] = tf.train.load_variable(_snake_case , _snake_case )
if "kernel" in name:
__snake_case : Optional[Any] = array.transpose()
return torch.from_numpy(_snake_case )
def get_encoder_attention_layer_array(_snake_case : int , _snake_case : str , _snake_case : str ):
__snake_case : Any = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : Dict = tf.train.load_variable(_snake_case , _snake_case )
__snake_case : int = array.reshape(_snake_case )
if "kernel" in name:
__snake_case : Optional[int] = array.transpose()
return torch.from_numpy(_snake_case )
print(f"""Loading model based on config from {config_path}...""" )
__snake_case : Optional[Any] = BertConfig.from_json_file(_snake_case )
__snake_case : Dict = BertForMaskedLM(_snake_case )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
__snake_case : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
__snake_case : BertSelfAttention = layer.attention.self
__snake_case : int = get_encoder_attention_layer_array(
_snake_case , '''_query_dense/kernel''' , self_attn.query.weight.data.shape )
__snake_case : str = get_encoder_attention_layer_array(
_snake_case , '''_query_dense/bias''' , self_attn.query.bias.data.shape )
__snake_case : str = get_encoder_attention_layer_array(
_snake_case , '''_key_dense/kernel''' , self_attn.key.weight.data.shape )
__snake_case : List[Any] = get_encoder_attention_layer_array(
_snake_case , '''_key_dense/bias''' , self_attn.key.bias.data.shape )
__snake_case : Tuple = get_encoder_attention_layer_array(
_snake_case , '''_value_dense/kernel''' , self_attn.value.weight.data.shape )
__snake_case : Union[str, Any] = get_encoder_attention_layer_array(
_snake_case , '''_value_dense/bias''' , self_attn.value.bias.data.shape )
# Self-attention Output
__snake_case : BertSelfOutput = layer.attention.output
__snake_case : Dict = get_encoder_attention_layer_array(
_snake_case , '''_output_dense/kernel''' , self_output.dense.weight.data.shape )
__snake_case : Tuple = get_encoder_attention_layer_array(
_snake_case , '''_output_dense/bias''' , self_output.dense.bias.data.shape )
__snake_case : str = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/gamma''' )
__snake_case : Any = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/beta''' )
# Intermediate
__snake_case : BertIntermediate = layer.intermediate
__snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/kernel''' )
__snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/bias''' )
# Output
__snake_case : BertOutput = layer.output
__snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_dense/kernel''' )
__snake_case : Dict = get_encoder_layer_array(_snake_case , '''_output_dense/bias''' )
__snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/gamma''' )
__snake_case : Union[str, Any] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/beta''' )
# Embeddings
__snake_case : Optional[int] = get_encoder_array('''_position_embedding_layer/embeddings''' )
__snake_case : str = get_encoder_array('''_type_embedding_layer/embeddings''' )
__snake_case : int = get_encoder_array('''_embedding_norm_layer/gamma''' )
__snake_case : Tuple = get_encoder_array('''_embedding_norm_layer/beta''' )
# LM Head
__snake_case : Optional[Any] = model.cls.predictions.transform
__snake_case : Dict = get_masked_lm_array('''dense/kernel''' )
__snake_case : Union[str, Any] = get_masked_lm_array('''dense/bias''' )
__snake_case : str = get_masked_lm_array('''layer_norm/gamma''' )
__snake_case : Tuple = get_masked_lm_array('''layer_norm/beta''' )
__snake_case : Tuple = get_masked_lm_array('''embedding_table''' )
# Pooling
__snake_case : Optional[Any] = BertPooler(config=_snake_case )
__snake_case : BertPooler = get_encoder_array('''_pooler_layer/kernel''' )
__snake_case : BertPooler = get_encoder_array('''_pooler_layer/bias''' )
# Export final model
model.save_pretrained(_snake_case )
# Integration test - should load without any errors ;)
__snake_case : Dict = BertForMaskedLM.from_pretrained(_snake_case )
print(new_model.eval() )
print('''Model conversion was done sucessfully!''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 24
| 1
|
"""simple docstring"""
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
def lowercase ( _snake_case : Any ) ->List[str]:
"""simple docstring"""
print('''Loading config file...''' )
def flatten_yaml_as_dict(_snake_case : Optional[Any] , _snake_case : str="" , _snake_case : List[str]="." ):
__snake_case : Dict = []
for k, v in d.items():
__snake_case : Optional[int] = parent_key + sep + k if parent_key else k
if isinstance(_snake_case , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(_snake_case , _snake_case , sep=_snake_case ).items() )
else:
items.append((new_key, v) )
return dict(_snake_case )
__snake_case : Dict = argparse.Namespace()
with open(_snake_case , '''r''' ) as yaml_file:
try:
__snake_case : Dict = yaml.load(_snake_case , Loader=yaml.FullLoader )
__snake_case : List[Any] = flatten_yaml_as_dict(_snake_case )
for k, v in flat_cfg.items():
setattr(_snake_case , _snake_case , _snake_case )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(_snake_case , str(_snake_case ) ) )
return config
def lowercase ( _snake_case : int , _snake_case : Optional[int] ) ->List[str]:
"""simple docstring"""
__snake_case : Tuple = MobileViTVaConfig()
__snake_case : Any = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
__snake_case : str = 1_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
__snake_case : Optional[Any] = 384
else:
__snake_case : Optional[int] = 256
__snake_case : Optional[int] = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
__snake_case : Any = 21_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
__snake_case : Dict = 384
else:
__snake_case : Tuple = 256
__snake_case : str = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
__snake_case : int = 151
__snake_case : List[str] = 512
__snake_case : int = '''ade20k-id2label.json'''
__snake_case : Optional[Any] = True
elif task_name.startswith('''voc_''' ):
__snake_case : Optional[Any] = 21
__snake_case : Union[str, Any] = 512
__snake_case : Union[str, Any] = '''pascal-voc-id2label.json'''
__snake_case : Dict = True
# orig_config
__snake_case : Union[str, Any] = load_orig_config_file(_snake_case )
assert getattr(_snake_case , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
__snake_case : Any = getattr(_snake_case , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(_snake_case , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
__snake_case : List[Any] = getattr(_snake_case , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
__snake_case : Tuple = getattr(_snake_case , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
__snake_case : Dict = getattr(_snake_case , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
__snake_case : Tuple = getattr(_snake_case , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
__snake_case : Dict = getattr(_snake_case , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
__snake_case : Any = '''huggingface/label-files'''
__snake_case : Optional[Any] = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='''dataset''' ) , '''r''' ) )
__snake_case : str = {int(_snake_case ): v for k, v in idalabel.items()}
__snake_case : Optional[int] = idalabel
__snake_case : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def lowercase ( _snake_case : List[Any] , _snake_case : str , _snake_case : Optional[int] ) ->int:
"""simple docstring"""
__snake_case : Dict = dct.pop(_snake_case )
__snake_case : str = val
def lowercase ( _snake_case : Optional[int] , _snake_case : Dict=False ) ->Tuple:
"""simple docstring"""
if base_model:
__snake_case : int = ''''''
else:
__snake_case : Optional[Any] = '''mobilevitv2.'''
__snake_case : Any = []
for k in state_dict.keys():
if k[:8] == "encoder.":
__snake_case : int = k[8:]
else:
__snake_case : Optional[Any] = k
if ".block." in k:
__snake_case : Optional[int] = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
__snake_case : List[Any] = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
__snake_case : Optional[Any] = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
__snake_case : str = k_new.replace('''conv_1.''' , f"""{model_prefix}conv_stem.""" )
for i in [1, 2]:
if f"""layer_{i}.""" in k:
__snake_case : List[Any] = k_new.replace(f"""layer_{i}.""" , f"""{model_prefix}encoder.layer.{i-1}.layer.""" )
if ".exp_1x1." in k:
__snake_case : Optional[int] = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
__snake_case : Any = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f"""layer_{i}.0.""" in k:
__snake_case : Dict = k_new.replace(f"""layer_{i}.0.""" , f"""{model_prefix}encoder.layer.{i-1}.downsampling_layer.""" )
if f"""layer_{i}.1.local_rep.0.""" in k:
__snake_case : Optional[int] = k_new.replace(f"""layer_{i}.1.local_rep.0.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_kxk.""" )
if f"""layer_{i}.1.local_rep.1.""" in k:
__snake_case : Union[str, Any] = k_new.replace(f"""layer_{i}.1.local_rep.1.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_1x1.""" )
for i in [3, 4, 5]:
if i == 3:
__snake_case : List[str] = [0, 1]
elif i == 4:
__snake_case : Optional[int] = [0, 1, 2, 3]
elif i == 5:
__snake_case : Dict = [0, 1, 2]
for j in j_in:
if f"""layer_{i}.1.global_rep.{j}.""" in k:
__snake_case : Any = k_new.replace(
f"""layer_{i}.1.global_rep.{j}.""" , f"""{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.""" )
if f"""layer_{i}.1.global_rep.{j+1}.""" in k:
__snake_case : Optional[int] = k_new.replace(
f"""layer_{i}.1.global_rep.{j+1}.""" , f"""{model_prefix}encoder.layer.{i-1}.layernorm.""" )
if f"""layer_{i}.1.conv_proj.""" in k:
__snake_case : int = k_new.replace(f"""layer_{i}.1.conv_proj.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_projection.""" )
if "pre_norm_attn.0." in k:
__snake_case : Any = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
__snake_case : str = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
__snake_case : Dict = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
__snake_case : Optional[int] = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
__snake_case : Optional[int] = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
__snake_case : Any = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
__snake_case : List[Any] = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
__snake_case : Dict = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
__snake_case : Optional[Any] = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def lowercase ( _snake_case : str ) ->List[str]:
"""simple docstring"""
__snake_case : Union[str, Any] = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(_snake_case )
for k in keys_to_ignore:
state_dict.pop(_snake_case , _snake_case )
def lowercase ( ) ->str:
"""simple docstring"""
__snake_case : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
__snake_case : List[Any] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return im
@torch.no_grad()
def lowercase ( _snake_case : Dict , _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : int ) ->List[str]:
"""simple docstring"""
__snake_case : List[Any] = get_mobilevitva_config(_snake_case , _snake_case )
# load original state_dict
__snake_case : Tuple = torch.load(_snake_case , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
__snake_case : Dict = MobileViTVaForSemanticSegmentation(_snake_case ).eval()
__snake_case : str = False
else:
__snake_case : Optional[int] = MobileViTVaForImageClassification(_snake_case ).eval()
__snake_case : int = False
# remove and rename some keys of load the original model
__snake_case : str = checkpoint
remove_unused_keys(_snake_case )
__snake_case : List[str] = create_rename_keys(_snake_case , base_model=_snake_case )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_snake_case , _snake_case , _snake_case )
# load modified state_dict
model.load_state_dict(_snake_case )
# Check outputs on an image, prepared by MobileViTImageProcessor
__snake_case : Dict = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
__snake_case : Dict = image_processor(images=prepare_img() , return_tensors='''pt''' )
__snake_case : Union[str, Any] = model(**_snake_case )
# verify classification model
if task_name.startswith('''imagenet''' ):
__snake_case : Optional[Any] = outputs.logits
__snake_case : Dict = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
__snake_case : List[str] = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] )
assert torch.allclose(logits[0, :3] , _snake_case , atol=1e-4 )
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(f"""Saving model {task_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_snake_case )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""",
default="""imagenet1k_256""",
type=str,
help=(
"""Name of the task for which the MobileViTV2 model you'd like to convert is trained on . """
"""
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
"""
),
choices=[
"""imagenet1k_256""",
"""imagenet1k_384""",
"""imagenet21k_to_1k_256""",
"""imagenet21k_to_1k_384""",
"""ade20k_deeplabv3""",
"""voc_deeplabv3""",
],
)
parser.add_argument(
"""--orig_checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument("""--orig_config_path""", required=True, type=str, help="""Path to the original config file.""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 24
|
"""simple docstring"""
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_ , a_ = None , a_ = None , a_ = False , **a_ , ):
'''simple docstring'''
super().__init__(features=a_ , cache_dir=a_ , keep_in_memory=a_ , **a_ )
__snake_case : Union[str, Any] = Sql(
cache_dir=a_ , features=a_ , sql=a_ , con=a_ , **a_ , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = None
__snake_case : Dict = None
__snake_case : Dict = None
__snake_case : List[str] = None
self.builder.download_and_prepare(
download_config=a_ , download_mode=a_ , verification_mode=a_ , base_path=a_ , )
# Build dataset for splits
__snake_case : Any = self.builder.as_dataset(
split='''train''' , verification_mode=a_ , in_memory=self.keep_in_memory )
return dataset
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_ , a_ , a_ = None , a_ = None , **a_ , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
__snake_case : List[str] = dataset
__snake_case : Tuple = name
__snake_case : Optional[int] = con
__snake_case : int = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__snake_case : Dict = num_proc
__snake_case : Dict = to_sql_kwargs
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.to_sql_kwargs.pop('''sql''' , a_ )
__snake_case : Union[str, Any] = self.to_sql_kwargs.pop('''con''' , a_ )
__snake_case : Any = self.to_sql_kwargs.pop('''index''' , a_ )
__snake_case : Optional[Any] = self._write(index=a_ , **self.to_sql_kwargs )
return written
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case , __snake_case , __snake_case : Optional[Any] = args
__snake_case : List[Any] = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
__snake_case : Dict = query_table(
table=self.dataset.data , key=slice(a_ , offset + self.batch_size ) , indices=self.dataset._indices , )
__snake_case : Tuple = batch.to_pandas()
__snake_case : str = df.to_sql(self.name , self.con , index=a_ , **a_ )
return num_rows or len(a_ )
def SCREAMING_SNAKE_CASE (self , a_ , **a_ ):
'''simple docstring'''
__snake_case : int = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
__snake_case , __snake_case : Union[str, Any] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , a_ , a_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 24
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowercase ( _snake_case : int = 4 ) ->list[list[int]]:
"""simple docstring"""
__snake_case : str = abs(_snake_case ) or 4
return [[1 + x + y * row_size for x in range(_snake_case )] for y in range(_snake_case )]
def lowercase ( _snake_case : list[list[int]] ) ->list[list[int]]:
"""simple docstring"""
return reverse_row(transpose(_snake_case ) )
# OR.. transpose(reverse_column(matrix))
def lowercase ( _snake_case : list[list[int]] ) ->list[list[int]]:
"""simple docstring"""
return reverse_row(reverse_column(_snake_case ) )
# OR.. reverse_column(reverse_row(matrix))
def lowercase ( _snake_case : list[list[int]] ) ->list[list[int]]:
"""simple docstring"""
return reverse_column(transpose(_snake_case ) )
# OR.. transpose(reverse_row(matrix))
def lowercase ( _snake_case : list[list[int]] ) ->list[list[int]]:
"""simple docstring"""
__snake_case : List[str] = [list(_snake_case ) for x in zip(*_snake_case )]
return matrix
def lowercase ( _snake_case : list[list[int]] ) ->list[list[int]]:
"""simple docstring"""
__snake_case : List[str] = matrix[::-1]
return matrix
def lowercase ( _snake_case : list[list[int]] ) ->list[list[int]]:
"""simple docstring"""
__snake_case : Dict = [x[::-1] for x in matrix]
return matrix
def lowercase ( _snake_case : list[list[int]] ) ->None:
"""simple docstring"""
for i in matrix:
print(*_snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 90 counterclockwise:\n""")
print_matrix(rotate_aa(matrix))
SCREAMING_SNAKE_CASE : Dict = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 180:\n""")
print_matrix(rotate_aaa(matrix))
SCREAMING_SNAKE_CASE : Tuple = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 270 counterclockwise:\n""")
print_matrix(rotate_aaa(matrix))
| 24
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[int] = {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""",
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='lxmert'
lowerCamelCase__ ={}
def __init__(self , a_=3_05_22 , a_=7_68 , a_=12 , a_=95_00 , a_=16_00 , a_=4_00 , a_=30_72 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=2 , a_=0.02 , a_=1E-12 , a_=9 , a_=5 , a_=5 , a_=20_48 , a_=4 , a_=6.67 , a_=True , a_=True , a_=True , a_=True , a_=True , a_=True , a_=True , **a_ , ):
'''simple docstring'''
__snake_case : Optional[int] = vocab_size
__snake_case : List[str] = hidden_size
__snake_case : List[Any] = num_attention_heads
__snake_case : int = hidden_act
__snake_case : int = intermediate_size
__snake_case : Any = hidden_dropout_prob
__snake_case : List[Any] = attention_probs_dropout_prob
__snake_case : Tuple = max_position_embeddings
__snake_case : List[str] = type_vocab_size
__snake_case : str = initializer_range
__snake_case : Tuple = layer_norm_eps
__snake_case : List[Any] = num_qa_labels
__snake_case : int = num_object_labels
__snake_case : Optional[Any] = num_attr_labels
__snake_case : Union[str, Any] = l_layers
__snake_case : Optional[int] = x_layers
__snake_case : Optional[int] = r_layers
__snake_case : Tuple = visual_feat_dim
__snake_case : Optional[int] = visual_pos_dim
__snake_case : Dict = visual_loss_normalizer
__snake_case : str = task_matched
__snake_case : Optional[Any] = task_mask_lm
__snake_case : List[str] = task_obj_predict
__snake_case : Optional[Any] = task_qa
__snake_case : Any = visual_obj_loss
__snake_case : int = visual_attr_loss
__snake_case : List[Any] = visual_feat_loss
__snake_case : Optional[Any] = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**a_ )
| 24
| 1
|
"""simple docstring"""
from __future__ import annotations
from random import random
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ = None ):
'''simple docstring'''
__snake_case : List[str] = value
__snake_case : List[str] = random()
__snake_case : Node | None = None
__snake_case : Node | None = None
def __repr__(self ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return f"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{f"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__(self ):
'''simple docstring'''
__snake_case : Tuple = str(self.value ) + ''' '''
__snake_case : Tuple = str(self.left or '''''' )
__snake_case : Union[str, Any] = str(self.right or '''''' )
return value + left + right
def lowercase ( _snake_case : Node | None , _snake_case : int ) ->tuple[Node | None, Node | None]:
"""simple docstring"""
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
__snake_case , __snake_case : Dict = split(root.left , _snake_case )
return left, root
else:
__snake_case , __snake_case : int = split(root.right , _snake_case )
return root, right
def lowercase ( _snake_case : Node | None , _snake_case : Node | None ) ->Node | None:
"""simple docstring"""
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
__snake_case : str = merge(left.right , _snake_case )
return left
else:
__snake_case : Any = merge(_snake_case , right.left )
return right
def lowercase ( _snake_case : Node | None , _snake_case : int ) ->Node | None:
"""simple docstring"""
__snake_case : List[str] = Node(_snake_case )
__snake_case , __snake_case : Union[str, Any] = split(_snake_case , _snake_case )
return merge(merge(_snake_case , _snake_case ) , _snake_case )
def lowercase ( _snake_case : Node | None , _snake_case : int ) ->Node | None:
"""simple docstring"""
__snake_case , __snake_case : List[str] = split(_snake_case , value - 1 )
__snake_case , __snake_case : Any = split(_snake_case , _snake_case )
return merge(_snake_case , _snake_case )
def lowercase ( _snake_case : Node | None ) ->None:
"""simple docstring"""
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def lowercase ( _snake_case : Node | None , _snake_case : str ) ->Node | None:
"""simple docstring"""
for arg in args.split():
if arg[0] == "+":
__snake_case : Optional[Any] = insert(_snake_case , int(arg[1:] ) )
elif arg[0] == "-":
__snake_case : Tuple = erase(_snake_case , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def lowercase ( ) ->None:
"""simple docstring"""
__snake_case : int = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
__snake_case : Any = input()
while args != "q":
__snake_case : Optional[Any] = interact_treap(_snake_case , _snake_case )
print(_snake_case )
__snake_case : str = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 24
|
"""simple docstring"""
def lowercase ( _snake_case : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = len(_snake_case )
__snake_case : str = sum(_snake_case )
__snake_case : Dict = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__snake_case : Optional[Any] = True
for i in range(1 , s + 1 ):
__snake_case : int = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__snake_case : Union[str, Any] = dp[i][j - 1]
if arr[i - 1] <= j:
__snake_case : Tuple = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__snake_case : List[str] = s - 2 * j
break
return diff
| 24
| 1
|
"""simple docstring"""
def lowercase ( _snake_case : int = 100 ) ->int:
"""simple docstring"""
__snake_case : str = n * (n + 1) * (2 * n + 1) / 6
__snake_case : Dict = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }')
| 24
|
"""simple docstring"""
from collections.abc import Callable
def lowercase ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) ->float:
"""simple docstring"""
__snake_case : float = a
__snake_case : float = b
if function(_snake_case ) == 0: # one of the a or b is a root for the function
return a
elif function(_snake_case ) == 0:
return b
elif (
function(_snake_case ) * function(_snake_case ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
__snake_case : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_snake_case ) == 0:
return mid
elif function(_snake_case ) * function(_snake_case ) < 0:
__snake_case : List[str] = mid
else:
__snake_case : str = mid
__snake_case : str = start + (end - start) / 2.0
return mid
def lowercase ( _snake_case : float ) ->float:
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 24
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 24
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : List[str] = {
"""configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""],
"""tokenization_luke""": ["""LukeTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
"""LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LukeForEntityClassification""",
"""LukeForEntityPairClassification""",
"""LukeForEntitySpanClassification""",
"""LukeForMultipleChoice""",
"""LukeForQuestionAnswering""",
"""LukeForSequenceClassification""",
"""LukeForTokenClassification""",
"""LukeForMaskedLM""",
"""LukeModel""",
"""LukePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 24
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=4 , ):
'''simple docstring'''
__snake_case : Optional[int] = parent
__snake_case : List[Any] = batch_size
__snake_case : List[Any] = seq_length
__snake_case : str = is_training
__snake_case : Dict = use_attention_mask
__snake_case : Optional[Any] = use_token_type_ids
__snake_case : str = use_labels
__snake_case : Optional[int] = vocab_size
__snake_case : Tuple = hidden_size
__snake_case : Tuple = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
__snake_case : Tuple = intermediate_size
__snake_case : Union[str, Any] = hidden_act
__snake_case : Dict = hidden_dropout_prob
__snake_case : Any = attention_probs_dropout_prob
__snake_case : str = max_position_embeddings
__snake_case : Optional[Any] = type_vocab_size
__snake_case : int = type_sequence_label_size
__snake_case : List[str] = initializer_range
__snake_case : str = num_choices
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Tuple = None
if self.use_attention_mask:
__snake_case : int = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Tuple = None
if self.use_token_type_ids:
__snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : Optional[Any] = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : str = config_and_inputs
__snake_case : Tuple = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : Dict = config_and_inputs
__snake_case : Optional[Any] = True
__snake_case : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__snake_case : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _UpperCAmelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =True
lowerCamelCase__ =(
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = FlaxRobertaPreLayerNormModelTester(self )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__snake_case : Union[str, Any] = model_class_name.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=a_ )
__snake_case : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(a_ )
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=a_ )
__snake_case : Dict = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
__snake_case : Optional[Any] = model(a_ )[0]
__snake_case : Tuple = [1, 11, 5_02_65]
self.assertEqual(list(output.shape ) , a_ )
# compare the actual values for a slice.
__snake_case : int = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , a_ , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = FlaxRobertaPreLayerNormModel.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=a_ )
__snake_case : int = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
__snake_case : str = model(a_ )[0]
# compare the actual values for a slice.
__snake_case : List[str] = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , a_ , atol=1E-4 ) )
| 24
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =['image_processor', 'tokenizer']
lowerCamelCase__ ='CLIPImageProcessor'
lowerCamelCase__ =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self , a_=None , a_=None , **a_ ):
'''simple docstring'''
__snake_case : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a_ , )
__snake_case : Union[str, Any] = kwargs.pop('''feature_extractor''' )
__snake_case : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a_ , a_ )
def __call__(self , a_=None , a_=None , a_=None , **a_ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__snake_case : Dict = self.tokenizer(a_ , return_tensors=a_ , **a_ )
if images is not None:
__snake_case : Optional[int] = self.image_processor(a_ , return_tensors=a_ , **a_ )
if text is not None and images is not None:
__snake_case : List[str] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a_ , **a_ )
def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ):
'''simple docstring'''
return self.tokenizer.decode(*a_ , **a_ )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.tokenizer.model_input_names
__snake_case : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 24
| 1
|
"""simple docstring"""
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =AutoencoderKL
lowerCamelCase__ ='sample'
lowerCamelCase__ =1E-2
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = 4
__snake_case : Optional[Any] = 3
__snake_case : Dict = (32, 32)
__snake_case : Any = floats_tensor((batch_size, num_channels) + sizes ).to(a_ )
return {"sample": image}
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return (3, 32, 32)
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return (3, 32, 32)
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
__snake_case : str = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : int = self.prepare_init_args_and_inputs_for_common()
__snake_case : Dict = self.model_class(**a_ )
model.to(a_ )
assert not model.is_gradient_checkpointing and model.training
__snake_case : Tuple = model(**a_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__snake_case : List[str] = torch.randn_like(a_ )
__snake_case : str = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__snake_case : Optional[int] = self.model_class(**a_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(a_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__snake_case : List[str] = model_a(**a_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__snake_case : Dict = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
__snake_case : Dict = dict(model.named_parameters() )
__snake_case : int = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : int = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=a_ )
self.assertIsNotNone(a_ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(a_ )
__snake_case : Any = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
__snake_case : Tuple = model.to(a_ )
model.eval()
if torch_device == "mps":
__snake_case : str = torch.manual_seed(0 )
else:
__snake_case : Optional[Any] = torch.Generator(device=a_ ).manual_seed(0 )
__snake_case : List[Any] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__snake_case : str = image.to(a_ )
with torch.no_grad():
__snake_case : Tuple = model(a_ , sample_posterior=a_ , generator=a_ ).sample
__snake_case : Optional[int] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__snake_case : List[str] = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
__snake_case : Any = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
__snake_case : str = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(a_ , a_ , rtol=1E-2 ) )
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
return f"""gaussian_noise_s={seed}_shape={'_'.join([str(a_ ) for s in shape] )}.npy"""
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE (self , a_=0 , a_=(4, 3, 5_12, 5_12) , a_=False ):
'''simple docstring'''
__snake_case : Any = torch.floataa if fpaa else torch.floataa
__snake_case : Any = torch.from_numpy(load_hf_numpy(self.get_file_format(a_ , a_ ) ) ).to(a_ ).to(a_ )
return image
def SCREAMING_SNAKE_CASE (self , a_="CompVis/stable-diffusion-v1-4" , a_=False ):
'''simple docstring'''
__snake_case : List[Any] = '''fp16''' if fpaa else None
__snake_case : List[Any] = torch.floataa if fpaa else torch.floataa
__snake_case : Tuple = AutoencoderKL.from_pretrained(
a_ , subfolder='''vae''' , torch_dtype=a_ , revision=a_ , )
model.to(a_ ).eval()
return model
def SCREAMING_SNAKE_CASE (self , a_=0 ):
'''simple docstring'''
if torch_device == "mps":
return torch.manual_seed(a_ )
return torch.Generator(device=a_ ).manual_seed(a_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : int = self.get_sd_vae_model()
__snake_case : Dict = self.get_sd_image(a_ )
__snake_case : List[Any] = self.get_generator(a_ )
with torch.no_grad():
__snake_case : Optional[int] = model(a_ , generator=a_ , sample_posterior=a_ ).sample
assert sample.shape == image.shape
__snake_case : int = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case : Optional[int] = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(a_ , a_ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : List[str] = self.get_sd_vae_model(fpaa=a_ )
__snake_case : List[Any] = self.get_sd_image(a_ , fpaa=a_ )
__snake_case : int = self.get_generator(a_ )
with torch.no_grad():
__snake_case : str = model(a_ , generator=a_ , sample_posterior=a_ ).sample
assert sample.shape == image.shape
__snake_case : Optional[int] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case : Tuple = torch.tensor(a_ )
assert torch_all_close(a_ , a_ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Tuple = self.get_sd_vae_model()
__snake_case : Optional[Any] = self.get_sd_image(a_ )
with torch.no_grad():
__snake_case : List[Any] = model(a_ ).sample
assert sample.shape == image.shape
__snake_case : Dict = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case : Any = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(a_ , a_ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : int = self.get_sd_vae_model()
__snake_case : Tuple = self.get_sd_image(a_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case : str = model.decode(a_ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
__snake_case : Dict = sample[-1, -2:, :2, -2:].flatten().cpu()
__snake_case : Any = torch.tensor(a_ )
assert torch_all_close(a_ , a_ , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : Any = self.get_sd_vae_model(fpaa=a_ )
__snake_case : Union[str, Any] = self.get_sd_image(a_ , shape=(3, 4, 64, 64) , fpaa=a_ )
with torch.no_grad():
__snake_case : Optional[int] = model.decode(a_ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
__snake_case : Optional[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case : Dict = torch.tensor(a_ )
assert torch_all_close(a_ , a_ , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Any = self.get_sd_vae_model(fpaa=a_ )
__snake_case : Any = self.get_sd_image(a_ , shape=(3, 4, 64, 64) , fpaa=a_ )
with torch.no_grad():
__snake_case : Optional[int] = model.decode(a_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case : Union[str, Any] = model.decode(a_ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
assert torch_all_close(a_ , a_ , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Optional[int] = self.get_sd_vae_model()
__snake_case : str = self.get_sd_image(a_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case : Optional[Any] = model.decode(a_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case : Optional[Any] = model.decode(a_ ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
assert torch_all_close(a_ , a_ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : Tuple = self.get_sd_vae_model()
__snake_case : Optional[int] = self.get_sd_image(a_ )
__snake_case : List[str] = self.get_generator(a_ )
with torch.no_grad():
__snake_case : List[str] = model.encode(a_ ).latent_dist
__snake_case : int = dist.sample(generator=a_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__snake_case : Optional[Any] = sample[0, -1, -3:, -3:].flatten().cpu()
__snake_case : Union[str, Any] = torch.tensor(a_ )
__snake_case : str = 3E-3 if torch_device != '''mps''' else 1E-2
assert torch_all_close(a_ , a_ , atol=a_ )
| 24
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : int = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE : List[Any] = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""",
"""facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE : Tuple = {
"""facebook/mbart-large-en-ro""": 1024,
"""facebook/mbart-large-cc25""": 1024,
}
# fmt: off
SCREAMING_SNAKE_CASE : List[Any] = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =['input_ids', 'attention_mask']
lowerCamelCase__ =MBartTokenizer
lowerCamelCase__ =[]
lowerCamelCase__ =[]
def __init__(self , a_=None , a_=None , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , a_=None , a_=None , a_=None , **a_ , ):
'''simple docstring'''
__snake_case : Optional[int] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
super().__init__(
vocab_file=a_ , tokenizer_file=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , cls_token=a_ , unk_token=a_ , pad_token=a_ , mask_token=a_ , src_lang=a_ , tgt_lang=a_ , additional_special_tokens=a_ , **a_ , )
__snake_case : Tuple = vocab_file
__snake_case : Optional[Any] = False if not self.vocab_file else True
__snake_case : Dict = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
__snake_case : Optional[int] = {
lang_code: self.convert_tokens_to_ids(a_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__snake_case : List[Any] = src_lang if src_lang is not None else '''en_XX'''
__snake_case : Any = self.convert_tokens_to_ids(self._src_lang )
__snake_case : Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
__snake_case : Tuple = [self.sep_token_id]
__snake_case : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , **a_ ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__snake_case : Optional[int] = src_lang
__snake_case : Tuple = self(a_ , add_special_tokens=a_ , return_tensors=a_ , **a_ )
__snake_case : Union[str, Any] = self.convert_tokens_to_ids(a_ )
__snake_case : int = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE (self , a_ , a_ = "en_XX" , a_ = None , a_ = "ro_RO" , **a_ , ):
'''simple docstring'''
__snake_case : int = src_lang
__snake_case : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(a_ , a_ , **a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : int = self.convert_tokens_to_ids(a_ )
__snake_case : List[Any] = []
__snake_case : Any = [self.eos_token_id, self.cur_lang_code]
__snake_case : List[str] = self.convert_ids_to_tokens(self.prefix_tokens )
__snake_case : Dict = self.convert_ids_to_tokens(self.suffix_tokens )
__snake_case : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : int = self.convert_tokens_to_ids(a_ )
__snake_case : Optional[Any] = []
__snake_case : Dict = [self.eos_token_id, self.cur_lang_code]
__snake_case : str = self.convert_ids_to_tokens(self.prefix_tokens )
__snake_case : Any = self.convert_ids_to_tokens(self.suffix_tokens )
__snake_case : Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(a_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
__snake_case : Optional[Any] = os.path.join(
a_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file , a_ )
return (out_vocab_file,)
| 24
| 1
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
def lowercase ( _snake_case : np.ndarray ) ->tuple[np.ndarray, np.ndarray]:
"""simple docstring"""
__snake_case , __snake_case : Optional[Any] = np.shape(_snake_case )
if rows != columns:
__snake_case : Union[str, Any] = (
'''\'table\' has to be of square shaped array but got a '''
f"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(_snake_case )
__snake_case : Tuple = np.zeros((rows, columns) )
__snake_case : List[str] = np.zeros((rows, columns) )
for i in range(_snake_case ):
for j in range(_snake_case ):
__snake_case : List[Any] = sum(lower[i][k] * upper[k][j] for k in range(_snake_case ) )
if upper[j][j] == 0:
raise ArithmeticError('''No LU decomposition exists''' )
__snake_case : Dict = (table[i][j] - total) / upper[j][j]
__snake_case : Tuple = 1
for j in range(_snake_case , _snake_case ):
__snake_case : List[str] = sum(lower[i][k] * upper[k][j] for k in range(_snake_case ) )
__snake_case : int = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.getLogger(__name__)
@dataclass(frozen=__snake_case )
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =42
lowerCamelCase__ =42
lowerCamelCase__ =None
lowerCamelCase__ =None
lowerCamelCase__ =None
@dataclass(frozen=__snake_case )
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =42
lowerCamelCase__ =None
lowerCamelCase__ =None
lowerCamelCase__ =None
lowerCamelCase__ =None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =42
def __init__(self , a_ , a_ , a_ , a_ = None , a_=False , a_ = False , ):
'''simple docstring'''
__snake_case : Any = hans_processors[task]()
__snake_case : int = os.path.join(
a_ , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a_ ) , a_ , ) , )
__snake_case : Tuple = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case : Dict = label_list[2], label_list[1]
__snake_case : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__snake_case : int = cached_features_file + '''.lock'''
with FileLock(a_ ):
if os.path.exists(a_ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
__snake_case : Union[str, Any] = torch.load(a_ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
__snake_case : Dict = (
processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ )
)
logger.info('''Training examples: %s''' , len(a_ ) )
__snake_case : Optional[int] = hans_convert_examples_to_features(a_ , a_ , a_ , a_ )
logger.info('''Saving features into cached file %s''' , a_ )
torch.save(self.features , a_ )
def __len__(self ):
'''simple docstring'''
return len(self.features )
def __getitem__(self , a_ ):
'''simple docstring'''
return self.features[i]
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =42
def __init__(self , a_ , a_ , a_ , a_ = 1_28 , a_=False , a_ = False , ):
'''simple docstring'''
__snake_case : List[Any] = hans_processors[task]()
__snake_case : str = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case : Tuple = label_list[2], label_list[1]
__snake_case : Dict = label_list
__snake_case : Optional[Any] = processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ )
__snake_case : Dict = hans_convert_examples_to_features(a_ , a_ , a_ , a_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(a_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__snake_case : Union[str, Any] = tf.data.Dataset.from_generator(
a_ , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.dataset
def __len__(self ):
'''simple docstring'''
return len(self.features )
def __getitem__(self , a_ ):
'''simple docstring'''
return self.features[i]
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.label_list
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(a_ , '''heuristics_train_set.txt''' ) ) , '''train''' )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(a_ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : List[Any] = []
for i, line in enumerate(a_ ):
if i == 0:
continue
__snake_case : Tuple = '''%s-%s''' % (set_type, line[0])
__snake_case : Dict = line[5]
__snake_case : int = line[6]
__snake_case : Dict = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
__snake_case : List[Any] = line[0]
examples.append(InputExample(guid=a_ , text_a=a_ , text_b=a_ , label=a_ , pairID=a_ ) )
return examples
def lowercase ( _snake_case : List[InputExample] , _snake_case : List[str] , _snake_case : int , _snake_case : PreTrainedTokenizer , ) ->List[str]:
"""simple docstring"""
__snake_case : Optional[int] = {label: i for i, label in enumerate(_snake_case )}
__snake_case : Tuple = []
for ex_index, example in tqdm.tqdm(enumerate(_snake_case ) , desc='''convert examples to features''' ):
if ex_index % 10_000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
__snake_case : List[Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=_snake_case , max_length=_snake_case , padding='''max_length''' , truncation=_snake_case , return_overflowing_tokens=_snake_case , )
__snake_case : List[Any] = label_map[example.label] if example.label in label_map else 0
__snake_case : Union[str, Any] = int(example.pairID )
features.append(InputFeatures(**_snake_case , label=_snake_case , pairID=_snake_case ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
SCREAMING_SNAKE_CASE : Dict = {
"""hans""": 3,
}
SCREAMING_SNAKE_CASE : str = {
"""hans""": HansProcessor,
}
| 24
| 1
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
SCREAMING_SNAKE_CASE : int = datasets.utils.logging.get_logger(__name__)
@dataclass
class _UpperCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
lowerCamelCase__ =10000
lowerCamelCase__ =None
lowerCamelCase__ =None
class _UpperCAmelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
lowerCamelCase__ =ParquetConfig
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__snake_case : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a_ , (str, list, tuple) ):
__snake_case : Union[str, Any] = data_files
if isinstance(a_ , a_ ):
__snake_case : Union[str, Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__snake_case : List[Any] = [dl_manager.iter_files(a_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__snake_case : int = []
for split_name, files in data_files.items():
if isinstance(a_ , a_ ):
__snake_case : List[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__snake_case : int = [dl_manager.iter_files(a_ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(a_ ):
with open(a_ , '''rb''' ) as f:
__snake_case : Any = datasets.Features.from_arrow_schema(pq.read_schema(a_ ) )
break
splits.append(datasets.SplitGenerator(name=a_ , gen_kwargs={'''files''': files} ) )
return splits
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__snake_case : List[Any] = table_cast(a_ , self.info.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : List[Any] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(a_ ) ):
with open(a_ , '''rb''' ) as f:
__snake_case : int = pq.ParquetFile(a_ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__snake_case : Dict = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"""{file_idx}_{batch_idx}""", self._cast_table(a_ )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(a_ )}: {e}""" )
raise
| 24
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = {
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='gptsan-japanese'
lowerCamelCase__ =[
'past_key_values',
]
lowerCamelCase__ ={
'hidden_size': 'd_model',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self , a_=3_60_00 , a_=12_80 , a_=10_24 , a_=81_92 , a_=40_96 , a_=1_28 , a_=10 , a_=0 , a_=16 , a_=16 , a_=1_28 , a_=0.0 , a_=1E-5 , a_=False , a_=0.0 , a_="float32" , a_=False , a_=False , a_=False , a_=0.002 , a_=False , a_=True , a_=3_59_98 , a_=3_59_95 , a_=3_59_99 , **a_ , ):
'''simple docstring'''
__snake_case : Any = vocab_size
__snake_case : str = max_position_embeddings
__snake_case : Any = d_model
__snake_case : List[str] = d_ff
__snake_case : Dict = d_ext
__snake_case : Optional[Any] = d_spout
__snake_case : int = num_switch_layers
__snake_case : List[Any] = num_ext_layers
__snake_case : Any = num_switch_layers + num_ext_layers
__snake_case : Optional[int] = num_heads
__snake_case : Tuple = num_experts
__snake_case : List[Any] = expert_capacity
__snake_case : Dict = dropout_rate
__snake_case : Optional[Any] = layer_norm_epsilon
__snake_case : Dict = router_bias
__snake_case : str = router_jitter_noise
__snake_case : List[str] = router_dtype
__snake_case : Union[str, Any] = router_ignore_padding_tokens
__snake_case : List[str] = output_hidden_states
__snake_case : Optional[Any] = output_attentions
__snake_case : Any = initializer_factor
__snake_case : int = output_router_logits
__snake_case : Union[str, Any] = use_cache
super().__init__(
separator_token_id=a_ , pad_token_id=a_ , eos_token_id=a_ , **a_ , )
| 24
| 1
|
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
SCREAMING_SNAKE_CASE : Union[str, Any] = NewType("""DataClass""", Any)
SCREAMING_SNAKE_CASE : str = NewType("""DataClassType""", Any)
def lowercase ( _snake_case : Optional[int] ) ->List[str]:
"""simple docstring"""
if isinstance(_snake_case , _snake_case ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def lowercase ( _snake_case : list ) ->Callable[[str], Any]:
"""simple docstring"""
__snake_case : int = {str(_snake_case ): choice for choice in choices}
return lambda _snake_case : str_to_choice.get(_snake_case , _snake_case )
def lowercase ( *,
_snake_case : Union[str, List[str]] = None , _snake_case : str = None , _snake_case : Any = dataclasses.MISSING , _snake_case : Callable[[], Any] = dataclasses.MISSING , _snake_case : dict = None , **_snake_case : str , ) ->dataclasses.Field:
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__snake_case : str = {}
if aliases is not None:
__snake_case : Any = aliases
if help is not None:
__snake_case : Dict = help
return dataclasses.field(metadata=_snake_case , default=_snake_case , default_factory=_snake_case , **_snake_case )
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =42
def __init__(self , a_ , **a_ ):
'''simple docstring'''
if "formatter_class" not in kwargs:
__snake_case : Dict = ArgumentDefaultsHelpFormatter
super().__init__(**a_ )
if dataclasses.is_dataclass(a_ ):
__snake_case : Optional[int] = [dataclass_types]
__snake_case : Any = list(a_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(a_ )
@staticmethod
def SCREAMING_SNAKE_CASE (a_ , a_ ):
'''simple docstring'''
__snake_case : Optional[int] = f"""--{field.name}"""
__snake_case : Optional[Any] = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , a_ ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
__snake_case : Optional[Any] = kwargs.pop('''aliases''' , [] )
if isinstance(a_ , a_ ):
__snake_case : Tuple = [aliases]
__snake_case : Union[str, Any] = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(a_ , '''UnionType''' ) and isinstance(a_ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(a_ ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
f""" Problem encountered in field '{field.name}'.""" )
if type(a_ ) not in field.type.__args__:
# filter `str` in Union
__snake_case : Optional[int] = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__snake_case : List[Any] = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__snake_case : List[str] = (
field.type.__args__[0] if isinstance(a_ , field.type.__args__[1] ) else field.type.__args__[1]
)
__snake_case : Union[str, Any] = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__snake_case : Optional[int] = {}
if origin_type is Literal or (isinstance(field.type , a_ ) and issubclass(field.type , a_ )):
if origin_type is Literal:
__snake_case : int = field.type.__args__
else:
__snake_case : Tuple = [x.value for x in field.type]
__snake_case : Tuple = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
__snake_case : List[Any] = field.default
else:
__snake_case : Union[str, Any] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__snake_case : List[str] = copy(a_ )
# Hack because type=bool in argparse does not behave as we want.
__snake_case : Tuple = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__snake_case : str = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__snake_case : Optional[int] = default
# This tells argparse we accept 0 or 1 value after --field_name
__snake_case : Optional[Any] = '''?'''
# This is the value that will get picked if we do --field_name (without value)
__snake_case : Any = True
elif isclass(a_ ) and issubclass(a_ , a_ ):
__snake_case : List[Any] = field.type.__args__[0]
__snake_case : List[Any] = '''+'''
if field.default_factory is not dataclasses.MISSING:
__snake_case : Any = field.default_factory()
elif field.default is dataclasses.MISSING:
__snake_case : List[Any] = True
else:
__snake_case : Any = field.type
if field.default is not dataclasses.MISSING:
__snake_case : Optional[int] = field.default
elif field.default_factory is not dataclasses.MISSING:
__snake_case : Tuple = field.default_factory()
else:
__snake_case : Tuple = True
parser.add_argument(a_ , *a_ , **a_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__snake_case : List[str] = False
parser.add_argument(f"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **a_ )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
if hasattr(a_ , '''_argument_group_name''' ):
__snake_case : Optional[Any] = self.add_argument_group(dtype._argument_group_name )
else:
__snake_case : List[str] = self
try:
__snake_case : Dict[str, type] = get_type_hints(a_ )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(a_ ):
__snake_case : int = '''.'''.join(map(a_ , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(a_ ):
if not field.init:
continue
__snake_case : List[Any] = type_hints[field.name]
self._parse_dataclass_field(a_ , a_ )
def SCREAMING_SNAKE_CASE (self , a_=None , a_=False , a_=True , a_=None , a_=None , ):
'''simple docstring'''
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
__snake_case : Union[str, Any] = []
if args_filename:
args_files.append(Path(a_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__snake_case : Union[str, Any] = ArgumentParser()
args_file_parser.add_argument(a_ , type=a_ , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
__snake_case , __snake_case : Optional[Any] = args_file_parser.parse_known_args(args=a_ )
__snake_case : Union[str, Any] = vars(a_ ).get(args_file_flag.lstrip('''-''' ) , a_ )
if cmd_args_file_paths:
args_files.extend([Path(a_ ) for p in cmd_args_file_paths] )
__snake_case : Dict = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__snake_case : List[str] = file_args + args if args is not None else file_args + sys.argv[1:]
__snake_case , __snake_case : List[str] = self.parse_known_args(args=a_ )
__snake_case : str = []
for dtype in self.dataclass_types:
__snake_case : Any = {f.name for f in dataclasses.fields(a_ ) if f.init}
__snake_case : Optional[Any] = {k: v for k, v in vars(a_ ).items() if k in keys}
for k in keys:
delattr(a_ , a_ )
__snake_case : str = dtype(**a_ )
outputs.append(a_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(a_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def SCREAMING_SNAKE_CASE (self , a_ , a_ = False ):
'''simple docstring'''
__snake_case : Tuple = set(args.keys() )
__snake_case : Optional[int] = []
for dtype in self.dataclass_types:
__snake_case : List[str] = {f.name for f in dataclasses.fields(a_ ) if f.init}
__snake_case : List[Any] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
__snake_case : List[Any] = dtype(**a_ )
outputs.append(a_ )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(a_ )}""" )
return tuple(a_ )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = False ):
'''simple docstring'''
with open(Path(a_ ) , encoding='''utf-8''' ) as open_json_file:
__snake_case : List[Any] = json.loads(open_json_file.read() )
__snake_case : Optional[Any] = self.parse_dict(a_ , allow_extra_keys=a_ )
return tuple(a_ )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = False ):
'''simple docstring'''
__snake_case : List[str] = self.parse_dict(yaml.safe_load(Path(a_ ).read_text() ) , allow_extra_keys=a_ )
return tuple(a_ )
| 24
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : str = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
SCREAMING_SNAKE_CASE : int = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def lowercase ( _snake_case : Optional[int] ) ->int:
"""simple docstring"""
__snake_case : int = {}
with open(_snake_case , '''r''' ) as file:
for line_number, line in enumerate(_snake_case ):
__snake_case : Union[str, Any] = line.strip()
if line:
__snake_case : str = line.split()
__snake_case : Union[str, Any] = line_number
__snake_case : Dict = words[0]
__snake_case : str = value
return result
def lowercase ( _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Tuple , _snake_case : Any , _snake_case : List[str] ) ->List[str]:
"""simple docstring"""
for attribute in key.split('''.''' ):
__snake_case : Dict = getattr(_snake_case , _snake_case )
__snake_case : Any = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_snake_case ):
__snake_case : int = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__snake_case : str = '''param'''
if weight_type is not None and weight_type != "param":
__snake_case : Union[str, Any] = getattr(_snake_case , _snake_case ).shape
elif weight_type is not None and weight_type == "param":
__snake_case : Optional[Any] = hf_pointer
for attribute in hf_param_name.split('''.''' ):
__snake_case : Dict = getattr(_snake_case , _snake_case )
__snake_case : List[str] = shape_pointer.shape
# let's reduce dimension
__snake_case : int = value[0]
else:
__snake_case : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__snake_case : List[Any] = value
elif weight_type == "weight_g":
__snake_case : Tuple = value
elif weight_type == "weight_v":
__snake_case : str = value
elif weight_type == "bias":
__snake_case : str = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
__snake_case : List[Any] = getattr(_snake_case , _snake_case )
__snake_case : int = value
else:
__snake_case : List[Any] = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowercase ( _snake_case : Any , _snake_case : List[Any] , _snake_case : Dict , _snake_case : List[str] , _snake_case : int ) ->int:
"""simple docstring"""
__snake_case : Optional[Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_snake_case ):
__snake_case : Dict = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__snake_case : List[str] = '''param'''
if weight_type is not None and weight_type != "param":
__snake_case : str = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__snake_case : Tuple = '''.'''.join([key, hf_param_name] )
else:
__snake_case : Optional[int] = key
__snake_case : List[Any] = value if '''lm_head''' in full_key else value[0]
SCREAMING_SNAKE_CASE : Tuple = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def lowercase ( _snake_case : str , _snake_case : List[Any] , _snake_case : Tuple=None , _snake_case : int=None ) ->Dict:
"""simple docstring"""
__snake_case : Tuple = False
for key, mapped_key in MAPPING.items():
__snake_case : int = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__snake_case : int = True
if "*" in mapped_key:
__snake_case : List[Any] = name.split(_snake_case )[0].split('''.''' )[-2]
__snake_case : Tuple = mapped_key.replace('''*''' , _snake_case )
if "weight_g" in name:
__snake_case : Union[str, Any] = '''weight_g'''
elif "weight_v" in name:
__snake_case : List[str] = '''weight_v'''
elif "bias" in name:
__snake_case : Any = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__snake_case : List[Any] = '''weight'''
else:
__snake_case : Union[str, Any] = None
if hf_dict is not None:
rename_dict(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
else:
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
return is_used
return is_used
def lowercase ( _snake_case : str , _snake_case : Dict , _snake_case : List[str] ) ->Any:
"""simple docstring"""
__snake_case : Union[str, Any] = []
__snake_case : Union[str, Any] = fairseq_model.state_dict()
__snake_case : str = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__snake_case : str = False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , )
__snake_case : Union[str, Any] = True
else:
__snake_case : Optional[Any] = load_wavaveca_layer(_snake_case , _snake_case , _snake_case )
if not is_used:
unused_weights.append(_snake_case )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowercase ( _snake_case : Any , _snake_case : str , _snake_case : Any , _snake_case : Tuple , _snake_case : List[str] ) ->Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = full_name.split('''conv_layers.''' )[-1]
__snake_case : str = name.split('''.''' )
__snake_case : Optional[int] = int(items[0] )
__snake_case : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__snake_case : int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__snake_case : Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__snake_case : Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__snake_case : List[str] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_snake_case )
@torch.no_grad()
def lowercase ( _snake_case : int , _snake_case : Union[str, Any] , _snake_case : Any=None , _snake_case : str=None , _snake_case : List[Any]=True , _snake_case : int=False ) ->Dict:
"""simple docstring"""
if config_path is not None:
__snake_case : Optional[Any] = WavaVecaConfig.from_pretrained(_snake_case )
else:
__snake_case : Tuple = WavaVecaConfig()
if is_seq_class:
__snake_case : Optional[int] = read_txt_into_dict(_snake_case )
__snake_case : List[Any] = idalabel
__snake_case : int = WavaVecaForSequenceClassification(_snake_case )
__snake_case : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , )
feature_extractor.save_pretrained(_snake_case )
elif is_finetuned:
if dict_path:
__snake_case : int = Dictionary.load(_snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__snake_case : Tuple = target_dict.pad_index
__snake_case : int = target_dict.bos_index
__snake_case : Tuple = target_dict.eos_index
__snake_case : Optional[Any] = len(target_dict.symbols )
__snake_case : Any = os.path.join(_snake_case , '''vocab.json''' )
if not os.path.isdir(_snake_case ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) )
return
os.makedirs(_snake_case , exist_ok=_snake_case )
__snake_case : Optional[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
__snake_case : Dict = 0
__snake_case : List[Any] = 1
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(_snake_case , _snake_case )
__snake_case : List[Any] = WavaVecaCTCTokenizer(
_snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , )
__snake_case : Tuple = True if config.feat_extract_norm == '''layer''' else False
__snake_case : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , )
__snake_case : Tuple = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case )
processor.save_pretrained(_snake_case )
__snake_case : Optional[int] = WavaVecaForCTC(_snake_case )
else:
__snake_case : Tuple = WavaVecaForPreTraining(_snake_case )
if is_finetuned or is_seq_class:
__snake_case , __snake_case , __snake_case : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__snake_case : Dict = argparse.Namespace(task='''audio_pretraining''' )
__snake_case : Optional[int] = fairseq.tasks.setup_task(_snake_case )
__snake_case , __snake_case , __snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_snake_case )
__snake_case : int = model[0].eval()
recursively_load_weights(_snake_case , _snake_case , not is_finetuned )
hf_wavavec.save_pretrained(_snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
SCREAMING_SNAKE_CASE : Tuple = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 24
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
def lowercase ( _snake_case : int , _snake_case : int , _snake_case : bool , _snake_case : list[int] , _snake_case : float ) ->int:
"""simple docstring"""
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , _snake_case , _snake_case , _snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , _snake_case , _snake_case , _snake_case ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , _snake_case , _snake_case , _snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , _snake_case , _snake_case , _snake_case ) , )
)
def lowercase ( ) ->None:
"""simple docstring"""
__snake_case : int = [90, 23, 6, 33, 21, 65, 123, 34_423]
__snake_case : int = math.log(len(_snake_case ) , 2 )
print(f"""Optimal value : {minimax(0 , 0 , _snake_case , _snake_case , _snake_case )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 24
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
lowerCamelCase__ =['transformers', 'torch', 'note_seq']
def __init__(self , *a_ , **a_ ):
'''simple docstring'''
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE (cls , *a_ , **a_ ):
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE (cls , *a_ , **a_ ):
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 24
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : int = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE : List[Any] = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""",
"""facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE : Tuple = {
"""facebook/mbart-large-en-ro""": 1024,
"""facebook/mbart-large-cc25""": 1024,
}
# fmt: off
SCREAMING_SNAKE_CASE : List[Any] = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =['input_ids', 'attention_mask']
lowerCamelCase__ =MBartTokenizer
lowerCamelCase__ =[]
lowerCamelCase__ =[]
def __init__(self , a_=None , a_=None , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , a_=None , a_=None , a_=None , **a_ , ):
'''simple docstring'''
__snake_case : Optional[int] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
super().__init__(
vocab_file=a_ , tokenizer_file=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , cls_token=a_ , unk_token=a_ , pad_token=a_ , mask_token=a_ , src_lang=a_ , tgt_lang=a_ , additional_special_tokens=a_ , **a_ , )
__snake_case : Tuple = vocab_file
__snake_case : Optional[Any] = False if not self.vocab_file else True
__snake_case : Dict = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
__snake_case : Optional[int] = {
lang_code: self.convert_tokens_to_ids(a_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__snake_case : List[Any] = src_lang if src_lang is not None else '''en_XX'''
__snake_case : Any = self.convert_tokens_to_ids(self._src_lang )
__snake_case : Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
__snake_case : Tuple = [self.sep_token_id]
__snake_case : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , **a_ ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__snake_case : Optional[int] = src_lang
__snake_case : Tuple = self(a_ , add_special_tokens=a_ , return_tensors=a_ , **a_ )
__snake_case : Union[str, Any] = self.convert_tokens_to_ids(a_ )
__snake_case : int = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE (self , a_ , a_ = "en_XX" , a_ = None , a_ = "ro_RO" , **a_ , ):
'''simple docstring'''
__snake_case : int = src_lang
__snake_case : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(a_ , a_ , **a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : int = self.convert_tokens_to_ids(a_ )
__snake_case : List[Any] = []
__snake_case : Any = [self.eos_token_id, self.cur_lang_code]
__snake_case : List[str] = self.convert_ids_to_tokens(self.prefix_tokens )
__snake_case : Dict = self.convert_ids_to_tokens(self.suffix_tokens )
__snake_case : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : int = self.convert_tokens_to_ids(a_ )
__snake_case : Optional[Any] = []
__snake_case : Dict = [self.eos_token_id, self.cur_lang_code]
__snake_case : str = self.convert_ids_to_tokens(self.prefix_tokens )
__snake_case : Any = self.convert_ids_to_tokens(self.suffix_tokens )
__snake_case : Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(a_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
__snake_case : Optional[Any] = os.path.join(
a_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file , a_ )
return (out_vocab_file,)
| 24
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=4_00 , a_=True , a_=None , a_=True , a_=None , a_=True , ):
'''simple docstring'''
__snake_case : List[Any] = size if size is not None else {'''shortest_edge''': 20}
__snake_case : int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__snake_case : Tuple = parent
__snake_case : Tuple = batch_size
__snake_case : Tuple = num_channels
__snake_case : List[str] = image_size
__snake_case : Optional[Any] = min_resolution
__snake_case : List[Any] = max_resolution
__snake_case : List[Any] = do_resize
__snake_case : Dict = size
__snake_case : Dict = do_center_crop
__snake_case : Dict = crop_size
__snake_case : str = do_flip_channel_order
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class _UpperCAmelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =MobileViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = MobileViTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , '''do_resize''' ) )
self.assertTrue(hasattr(a_ , '''size''' ) )
self.assertTrue(hasattr(a_ , '''do_center_crop''' ) )
self.assertTrue(hasattr(a_ , '''center_crop''' ) )
self.assertTrue(hasattr(a_ , '''do_flip_channel_order''' ) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
__snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case : str = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
__snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case : Union[str, Any] = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
__snake_case : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__snake_case : Tuple = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 24
| 1
|
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__)
def lowercase ( _snake_case : torch.nn.Module , _snake_case : BnbQuantizationConfig , _snake_case : Union[str, os.PathLike] = None , _snake_case : Optional[Dict[str, Union[int, str, torch.device]]] = None , _snake_case : Optional[List[str]] = None , _snake_case : Optional[Dict[Union[int, str], Union[int, str]]] = None , _snake_case : Optional[Union[str, os.PathLike]] = None , _snake_case : bool = False , ) ->Union[str, Any]:
"""simple docstring"""
__snake_case : int = bnb_quantization_config.load_in_abit
__snake_case : Union[str, Any] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
__snake_case : str = []
# custom device map
if isinstance(_snake_case , _snake_case ) and len(device_map.keys() ) > 1:
__snake_case : Any = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
__snake_case : List[str] = get_keys_to_not_convert(_snake_case )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_snake_case )
__snake_case : int = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
__snake_case : int = []
__snake_case : str = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_snake_case )
# compatibility with peft
__snake_case : Dict = load_in_abit
__snake_case : Any = load_in_abit
__snake_case : List[Any] = get_parameter_device(_snake_case )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
__snake_case : List[Any] = replace_with_bnb_layers(_snake_case , _snake_case , modules_to_not_convert=_snake_case )
# convert param to the right dtype
__snake_case : List[Any] = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
__snake_case : Union[str, Any] = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
__snake_case : Optional[Any] = getattr(_snake_case , _snake_case , _snake_case )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_snake_case ):
param.to(_snake_case )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
f"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
__snake_case : List[str] = replace_with_bnb_layers(
_snake_case , _snake_case , modules_to_not_convert=_snake_case )
__snake_case : Optional[Any] = get_quantized_model_device_map(
_snake_case , _snake_case , _snake_case , max_memory=_snake_case , no_split_module_classes=_snake_case , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
__snake_case : Dict = True
__snake_case : List[str] = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
_snake_case , _snake_case , _snake_case , dtype=bnb_quantization_config.torch_dtype , offload_folder=_snake_case , offload_state_dict=_snake_case , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(_snake_case , device_map=_snake_case , offload_dir=_snake_case )
def lowercase ( _snake_case : Tuple , _snake_case : List[str] , _snake_case : Optional[int]=None , _snake_case : int=None , _snake_case : Optional[int]=None ) ->str:
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
__snake_case : Dict = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(_snake_case , _snake_case ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
__snake_case : Any = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
__snake_case : List[Any] = {}
__snake_case : Optional[Any] = special_dtypes
__snake_case : Union[str, Any] = no_split_module_classes
__snake_case : Tuple = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
__snake_case : Any = get_balanced_memory(
_snake_case , low_zero=(device_map == '''balanced_low_0''') , max_memory=_snake_case , **_snake_case , )
__snake_case : Tuple = max_memory
__snake_case : Dict = infer_auto_device_map(_snake_case , **_snake_case )
if isinstance(_snake_case , _snake_case ):
# check if don't have any quantized module on the cpu
__snake_case : List[str] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
__snake_case : Any = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def lowercase ( _snake_case : List[Any] , _snake_case : Any , _snake_case : Optional[Any]=None , _snake_case : Tuple=None ) ->int:
"""simple docstring"""
if modules_to_not_convert is None:
__snake_case : List[Any] = []
__snake_case , __snake_case : Any = _replace_with_bnb_layers(
_snake_case , _snake_case , _snake_case , _snake_case )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def lowercase ( _snake_case : Optional[Any] , _snake_case : Dict , _snake_case : Tuple=None , _snake_case : Dict=None , ) ->Union[str, Any]:
"""simple docstring"""
__snake_case : List[str] = False
for name, module in model.named_children():
if current_key_name is None:
__snake_case : str = []
current_key_name.append(_snake_case )
if isinstance(_snake_case , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
__snake_case : str = '''.'''.join(_snake_case )
__snake_case : Any = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
__snake_case : Dict = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
__snake_case : Union[str, Any] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=_snake_case , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
__snake_case : Optional[Any] = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
__snake_case : List[str] = module.weight.data
if module.bias is not None:
__snake_case : int = module.bias.data
bnb_module.requires_grad_(_snake_case )
setattr(_snake_case , _snake_case , _snake_case )
__snake_case : Tuple = True
if len(list(module.children() ) ) > 0:
__snake_case , __snake_case : Tuple = _replace_with_bnb_layers(
_snake_case , _snake_case , _snake_case , _snake_case )
__snake_case : List[str] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowercase ( _snake_case : Optional[int] ) ->Optional[int]:
"""simple docstring"""
with init_empty_weights():
__snake_case : Any = deepcopy(_snake_case ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
__snake_case : Tuple = find_tied_parameters(_snake_case )
# For compatibility with Accelerate < 0.18
if isinstance(_snake_case , _snake_case ):
__snake_case : Tuple = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
__snake_case : str = sum(_snake_case , [] )
__snake_case : Dict = len(_snake_case ) > 0
# Check if it is a base model
__snake_case : Tuple = False
if hasattr(_snake_case , '''base_model_prefix''' ):
__snake_case : Any = not hasattr(_snake_case , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
__snake_case : List[Any] = list(model.named_children() )
__snake_case : Union[str, Any] = [list_modules[-1][0]]
# add last module together with tied weights
__snake_case : Optional[Any] = set(_snake_case ) - set(_snake_case )
__snake_case : List[Any] = list(set(_snake_case ) ) + list(_snake_case )
# remove ".weight" from the keys
__snake_case : List[str] = ['''.weight''', '''.bias''']
__snake_case : Any = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
__snake_case : Dict = name.replace(_snake_case , '''''' )
filtered_module_names.append(_snake_case )
return filtered_module_names
def lowercase ( _snake_case : str ) ->Optional[int]:
"""simple docstring"""
for m in model.modules():
if isinstance(_snake_case , bnb.nn.Linearabit ):
return True
return False
def lowercase ( _snake_case : nn.Module ) ->Optional[int]:
"""simple docstring"""
return next(parameter.parameters() ).device
def lowercase ( _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : Dict , _snake_case : str , _snake_case : int , _snake_case : Union[str, Any] , _snake_case : int ) ->int:
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(_snake_case , _snake_case , 0 , dtype=_snake_case , value=_snake_case )
__snake_case : int = param_name
__snake_case : Dict = model
if "." in tensor_name:
__snake_case : Any = tensor_name.split('''.''' )
for split in splits[:-1]:
__snake_case : Optional[Any] = getattr(_snake_case , _snake_case )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
__snake_case : Any = new_module
__snake_case : Tuple = splits[-1]
# offload weights
__snake_case : Dict = False
offload_weight(module._parameters[tensor_name] , _snake_case , _snake_case , index=_snake_case )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , _snake_case , index=_snake_case , )
else:
offload_weight(_snake_case , _snake_case , _snake_case , index=_snake_case )
offload_weight(_snake_case , param_name.replace('''weight''' , '''SCB''' ) , _snake_case , index=_snake_case )
set_module_tensor_to_device(_snake_case , _snake_case , '''meta''' , dtype=_snake_case , value=torch.empty(*param.size() ) )
| 24
|
"""simple docstring"""
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowercase ( ) ->Optional[int]:
"""simple docstring"""
__snake_case : int = torch.nn.Linear(2 , 4 )
__snake_case : Optional[Any] = torch.optim.AdamW(model.parameters() , lr=1.0 )
__snake_case : Optional[Any] = torch.optim.lr_scheduler.OneCycleLR(_snake_case , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
__snake_case : List[str] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
__snake_case : Dict = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowercase ( _snake_case : str ) ->Optional[Any]:
"""simple docstring"""
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowercase ( _snake_case : Union[str, Any] ) ->Tuple:
"""simple docstring"""
__snake_case : Dict = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(_snake_case )
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
@require_cuda
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(a_ ):
__snake_case : Any = Accelerator(cpu=a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
__snake_case : Optional[int] = GradientState()
assert state.num_steps == 1
__snake_case : str = 4
assert state.num_steps == 4
assert state.sync_gradients is True
__snake_case : List[Any] = False
assert state.sync_gradients is False
GradientState._reset_state()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Union[str, Any] = accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*a_ , **a_ ):
pass
with patch('''torch.cuda.set_device''' , a_ ), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64''' ):
__snake_case : List[Any] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , '''cuda:64''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
__snake_case : Any = get_signature(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
__snake_case : List[Any] = get_signature(a_ )
# saving hook
def save_config(a_ , a_ , a_ ):
__snake_case : Optional[Any] = {'''class_name''': models[0].__class__.__name__}
with open(os.path.join(a_ , '''data.json''' ) , '''w''' ) as f:
json.dump(a_ , a_ )
# loading hook
def load_config(a_ , a_ ):
with open(os.path.join(a_ , '''data.json''' ) , '''r''' ) as f:
__snake_case : Any = json.load(a_ )
__snake_case : List[str] = config['''class_name''']
__snake_case : str = accelerator.register_save_state_pre_hook(a_ )
__snake_case : Union[str, Any] = accelerator.register_load_state_pre_hook(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match with hooks
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# random class name to verify correct one is loaded
__snake_case : Any = '''random'''
# make sure loaded weights match with hooks
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match with hooks removed
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# random class name to verify correct one is loaded
__snake_case : Union[str, Any] = '''random'''
# make sure loaded weights match with hooks removed
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = create_components()
__snake_case : Union[str, Any] = None
# This should work
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ , a_ )
self.assertTrue(dummy_obj is None )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components()
__snake_case : Optional[int] = [1, 2, 3]
# This should work
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ , a_ )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : Dict = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map={'''''': 0} , )
__snake_case : Optional[Any] = Accelerator()
# This should work
__snake_case : Any = accelerator.prepare(a_ )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : Any = Accelerator()
with init_empty_weights():
__snake_case : List[str] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
__snake_case : Union[str, Any] = infer_auto_device_map(a_ )
__snake_case : str = '''cpu'''
__snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , device_map=a_ , load_in_abit=a_ , llm_inta_enable_fpaa_cpu_offload=a_ )
# This should not work and get value error
with self.assertRaises(a_ ):
__snake_case : Dict = accelerator.prepare(a_ )
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : str = {'''distributed_type''': DistributedType.MULTI_GPU}
with init_empty_weights():
__snake_case : Any = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
__snake_case : List[Any] = infer_auto_device_map(a_ )
__snake_case : Dict = 1
__snake_case : str = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , )
__snake_case : Any = Accelerator()
# This should not work and get value error
with self.assertRaises(a_ ):
__snake_case : Tuple = accelerator.prepare(a_ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
with init_empty_weights():
__snake_case : Dict = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
__snake_case : Tuple = infer_auto_device_map(a_ )
__snake_case : Tuple = 1
__snake_case : List[Any] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , )
__snake_case : Tuple = Accelerator()
# This should work
__snake_case : Dict = accelerator.prepare(a_ )
@require_cuda
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = torch.nn.Linear(10 , 10 )
__snake_case : List[str] = torch.optim.SGD(model.parameters() , lr=0.01 )
__snake_case : Optional[Any] = Accelerator(cpu=a_ )
__snake_case : str = accelerator.prepare(a_ )
| 24
| 1
|
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
SCREAMING_SNAKE_CASE : Tuple = None
try:
import msvcrt
except ImportError:
SCREAMING_SNAKE_CASE : List[str] = None
try:
import fcntl
except ImportError:
SCREAMING_SNAKE_CASE : Tuple = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
SCREAMING_SNAKE_CASE : List[str] = OSError
# Data
# ------------------------------------------------
SCREAMING_SNAKE_CASE : List[Any] = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
SCREAMING_SNAKE_CASE : List[Any] = """3.0.12"""
SCREAMING_SNAKE_CASE : int = None
def lowercase ( ) ->str:
"""simple docstring"""
global _logger
__snake_case : Union[str, Any] = _logger or logging.getLogger(__name__ )
return _logger
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ ):
'''simple docstring'''
__snake_case : Optional[int] = lock_file
return None
def __str__(self ):
'''simple docstring'''
__snake_case : Tuple = f"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ ):
'''simple docstring'''
__snake_case : Optional[Any] = lock
return None
def __enter__(self ):
'''simple docstring'''
return self.lock
def __exit__(self , a_ , a_ , a_ ):
'''simple docstring'''
self.lock.release()
return None
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=-1 , a_=None ):
'''simple docstring'''
__snake_case : List[Any] = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
__snake_case : Dict = self.hash_filename_if_too_long(a_ , a_ )
# The path to the lock file.
__snake_case : str = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__snake_case : Dict = None
# The default timeout value.
__snake_case : List[Any] = timeout
# We use this lock primarily for the lock counter.
__snake_case : Tuple = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__snake_case : Optional[Any] = 0
return None
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._lock_file
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._timeout
@timeout.setter
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Dict = float(a_ )
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
raise NotImplementedError()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
raise NotImplementedError()
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._lock_file_fd is not None
def SCREAMING_SNAKE_CASE (self , a_=None , a_=0.05 ):
'''simple docstring'''
if timeout is None:
__snake_case : List[str] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__snake_case : Optional[int] = id(self )
__snake_case : str = self._lock_file
__snake_case : Optional[int] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(f"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
f"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(a_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__snake_case : Optional[int] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def SCREAMING_SNAKE_CASE (self , a_=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__snake_case : Tuple = id(self )
__snake_case : str = self._lock_file
logger().debug(f"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
__snake_case : Dict = 0
logger().debug(f"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__(self ):
'''simple docstring'''
self.acquire()
return self
def __exit__(self , a_ , a_ , a_ ):
'''simple docstring'''
self.release()
return None
def __del__(self ):
'''simple docstring'''
self.release(force=a_ )
return None
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : Any = os.path.basename(a_ )
if len(a_ ) > max_length and max_length > 0:
__snake_case : List[Any] = os.path.dirname(a_ )
__snake_case : Any = str(hash(a_ ) )
__snake_case : List[Any] = filename[: max_length - len(a_ ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(a_ , a_ )
else:
return path
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_=-1 , a_=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(a_ , timeout=a_ , max_filename_length=a_ )
__snake_case : List[str] = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__snake_case : Any = os.open(self._lock_file , a_ )
except OSError:
pass
else:
try:
msvcrt.locking(a_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(a_ )
else:
__snake_case : Dict = fd
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self._lock_file_fd
__snake_case : Dict = None
msvcrt.locking(a_ , msvcrt.LK_UNLCK , 1 )
os.close(a_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_=-1 , a_=None ):
'''simple docstring'''
__snake_case : Optional[Any] = os.statvfs(os.path.dirname(a_ ) ).f_namemax
super().__init__(a_ , timeout=a_ , max_filename_length=a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__snake_case : List[str] = os.open(self._lock_file , a_ )
try:
fcntl.flock(a_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(a_ )
else:
__snake_case : Optional[int] = fd
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self._lock_file_fd
__snake_case : Tuple = None
fcntl.flock(a_ , fcntl.LOCK_UN )
os.close(a_ )
return None
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__snake_case : Tuple = os.open(self._lock_file , a_ )
except OSError:
pass
else:
__snake_case : List[Any] = fd
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
os.close(self._lock_file_fd )
__snake_case : int = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
SCREAMING_SNAKE_CASE : Dict = None
if msvcrt:
SCREAMING_SNAKE_CASE : List[Any] = WindowsFileLock
elif fcntl:
SCREAMING_SNAKE_CASE : List[str] = UnixFileLock
else:
SCREAMING_SNAKE_CASE : str = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 24
|
"""simple docstring"""
def lowercase ( _snake_case : int ) ->str:
"""simple docstring"""
if number > 0:
raise ValueError('''input must be a negative integer''' )
__snake_case : Any = len(bin(_snake_case )[3:] )
__snake_case : List[Any] = bin(abs(_snake_case ) - (1 << binary_number_length) )[3:]
__snake_case : Dict = (
(
'''1'''
+ '''0''' * (binary_number_length - len(_snake_case ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24
| 1
|
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCAmelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =LongformerTokenizer
lowerCamelCase__ =True
lowerCamelCase__ =LongformerTokenizerFast
lowerCamelCase__ =True
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case : List[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__snake_case : Union[str, Any] = dict(zip(a_ , range(len(a_ ) ) ) )
__snake_case : List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__snake_case : List[Any] = {'''unk_token''': '''<unk>'''}
__snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__snake_case : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(a_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(a_ ) )
def SCREAMING_SNAKE_CASE (self , **a_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **a_ )
def SCREAMING_SNAKE_CASE (self , **a_ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **a_ )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : str = '''lower newer'''
__snake_case : int = '''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__snake_case : Any = '''lower newer'''
__snake_case : Optional[Any] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__snake_case : List[str] = tokenizer.tokenize(a_ ) # , add_prefix_space=True)
self.assertListEqual(a_ , a_ )
__snake_case : List[str] = tokens + [tokenizer.unk_token]
__snake_case : int = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=a_ ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=a_ ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
__snake_case : List[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=a_ )
__snake_case : Any = tokenizer.encode('''multi-sequence build''' , add_special_tokens=a_ )
__snake_case : int = tokenizer.encode(
'''sequence builders''' , add_special_tokens=a_ , add_prefix_space=a_ )
__snake_case : int = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=a_ , add_prefix_space=a_ )
__snake_case : Optional[int] = tokenizer.build_inputs_with_special_tokens(a_ )
__snake_case : str = tokenizer.build_inputs_with_special_tokens(a_ , a_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.get_tokenizer()
__snake_case : Optional[int] = '''Encode this sequence.'''
__snake_case : int = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
__snake_case : List[str] = tokenizer.encode(a_ , add_special_tokens=a_ , add_prefix_space=a_ )
__snake_case : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(a_ , a_ )
__snake_case : List[Any] = tokenizer.encode(a_ , add_special_tokens=a_ , add_prefix_space=a_ )
__snake_case : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(a_ , a_ )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
__snake_case : Optional[int] = tokenizer.encode(a_ , add_special_tokens=a_ )
__snake_case : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(a_ , a_ )
# Testing spaces after special tokens
__snake_case : List[str] = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(a_ , lstrip=a_ , rstrip=a_ )} ) # mask token has a left space
__snake_case : Tuple = tokenizer.convert_tokens_to_ids(a_ )
__snake_case : int = '''Encode <mask> sequence'''
__snake_case : Optional[int] = '''Encode <mask>sequence'''
__snake_case : str = tokenizer.encode(a_ )
__snake_case : Any = encoded.index(a_ )
__snake_case : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(a_ , a_ )
__snake_case : Optional[int] = tokenizer.encode(a_ )
__snake_case : Optional[Any] = encoded.index(a_ )
__snake_case : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(a_ , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(a_ , **a_ )
__snake_case : str = self.tokenizer_class.from_pretrained(a_ , **a_ )
__snake_case : Tuple = '''A, <mask> AllenNLP sentence.'''
__snake_case : Union[str, Any] = tokenizer_r.encode_plus(a_ , add_special_tokens=a_ , return_token_type_ids=a_ )
__snake_case : str = tokenizer_p.encode_plus(a_ , add_special_tokens=a_ , return_token_type_ids=a_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
__snake_case : Any = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
__snake_case : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
a_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
a_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
__snake_case : Dict = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__snake_case : int = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , a_ )
self.assertEqual(post_processor_state['''add_prefix_space'''] , a_ )
self.assertEqual(post_processor_state['''trim_offsets'''] , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__snake_case : List[Any] = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__snake_case : List[str] = f"""{text_of_1_token} {text_of_1_token}"""
__snake_case : str = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
__snake_case : List[Any] = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )) , )
__snake_case : List[Any] = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
__snake_case : Any = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )) , )
__snake_case : int = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
__snake_case : Optional[int] = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a_ ), len(a_ ) + 1 + len(a_ )) , )
__snake_case : Any = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
__snake_case : Optional[int] = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a_ ), len(a_ ) + 1 + len(a_ )) , )
__snake_case : List[Any] = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__snake_case : List[str] = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
__snake_case : Tuple = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a_ ) + 1, 1 + len(a_ ) + 1 + len(a_ )) , )
__snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
__snake_case : Optional[Any] = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a_ ), 1 + len(a_ ) + 1 + len(a_ )) , )
__snake_case : Tuple = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
__snake_case : int = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a_ ), 1 + len(a_ ) + 1 + len(a_ )) , )
| 24
|
"""simple docstring"""
def lowercase ( ) ->int:
"""simple docstring"""
return [
a * b * (1_000 - a - b)
for a in range(1 , 999 )
for b in range(_snake_case , 999 )
if (a * a + b * b == (1_000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 24
| 1
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowercase ( _snake_case : Optional[Any] ) ->str:
"""simple docstring"""
__snake_case : Optional[int] = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
__snake_case : List[Any] = True if '''large''' in model_name or '''huge''' in model_name else False
__snake_case : str = True if '''large''' in model_name or '''huge''' in model_name else False
__snake_case : int = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__snake_case : Optional[Any] = [3, 3, 3, 3]
__snake_case : Optional[Any] = [5, 5, 5, 5]
elif "fl4" in model_name:
__snake_case : Optional[Any] = [4, 4, 4, 4]
__snake_case : Any = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__snake_case : List[Any] = [3, 3, 3, 3]
if "lrf" in model_name:
__snake_case : Dict = [3, 3, 3, 3]
else:
__snake_case : int = [2, 2, 2, 2]
if "tiny" in model_name:
__snake_case : Dict = 96
elif "small" in model_name:
__snake_case : Dict = 96
elif "base" in model_name:
__snake_case : Dict = 128
elif "large" in model_name:
__snake_case : int = 192
elif "xlarge" in model_name:
__snake_case : int = 256
elif "huge" in model_name:
__snake_case : Tuple = 352
# set label information
__snake_case : Optional[int] = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
__snake_case : List[Any] = '''imagenet-22k-id2label.json'''
else:
__snake_case : str = '''imagenet-1k-id2label.json'''
__snake_case : Optional[Any] = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='''dataset''' ) , '''r''' ) )
__snake_case : Tuple = {int(_snake_case ): v for k, v in idalabel.items()}
__snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()}
__snake_case : Union[str, Any] = FocalNetConfig(
embed_dim=_snake_case , depths=_snake_case , focal_levels=_snake_case , focal_windows=_snake_case , use_conv_embed=_snake_case , idalabel=_snake_case , labelaid=_snake_case , use_post_layernorm=_snake_case , use_layerscale=_snake_case , )
return config
def lowercase ( _snake_case : Optional[Any] ) ->List[str]:
"""simple docstring"""
if "patch_embed.proj" in name:
__snake_case : Optional[int] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
__snake_case : List[Any] = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
__snake_case : List[str] = '''encoder.''' + name
if "encoder.layers" in name:
__snake_case : str = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
__snake_case : Tuple = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
__snake_case : Dict = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__snake_case : Optional[int] = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__snake_case : Tuple = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__snake_case : int = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
__snake_case : str = '''layernorm.weight'''
if name == "norm.bias":
__snake_case : int = '''layernorm.bias'''
if "head" in name:
__snake_case : Optional[Any] = name.replace('''head''' , '''classifier''' )
else:
__snake_case : Any = '''focalnet.''' + name
return name
def lowercase ( _snake_case : Any , _snake_case : Optional[Any] , _snake_case : str=False ) ->Dict:
"""simple docstring"""
__snake_case : Any = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
__snake_case : Union[str, Any] = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , _snake_case )
__snake_case : Any = torch.hub.load_state_dict_from_url(_snake_case , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
__snake_case : Dict = state_dict.pop(_snake_case )
__snake_case : Dict = val
__snake_case : int = get_focalnet_config(_snake_case )
__snake_case : List[Any] = FocalNetForImageClassification(_snake_case )
model.eval()
# load state dict
model.load_state_dict(_snake_case )
# verify conversion
__snake_case : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__snake_case : Dict = BitImageProcessor(
do_resize=_snake_case , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=_snake_case , crop_size=224 , do_normalize=_snake_case , image_mean=_snake_case , image_std=_snake_case , )
__snake_case : Optional[int] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
__snake_case : Tuple = processor(images=_snake_case , return_tensors='''pt''' )
__snake_case : List[str] = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__snake_case : str = image_transforms(_snake_case ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _snake_case , atol=1e-4 )
__snake_case : Any = model(**_snake_case )
__snake_case : Dict = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__snake_case : Any = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
__snake_case : Any = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
__snake_case : Dict = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
__snake_case : Union[str, Any] = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
__snake_case : Optional[int] = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
__snake_case : Dict = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_snake_case )
processor.save_pretrained(_snake_case )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 24
|
"""simple docstring"""
def lowercase ( _snake_case : int = 100 ) ->int:
"""simple docstring"""
__snake_case : str = n * (n + 1) * (2 * n + 1) / 6
__snake_case : Dict = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }')
| 24
| 1
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
@dataclass
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =[
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__(self , **a_ ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__snake_case : List[Any] = deprecated_arg[3:]
setattr(self , a_ , not kwargs.pop(a_ ) )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
__snake_case : Optional[Any] = kwargs.pop('''torchscript''' , self.torchscript )
__snake_case : str = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
__snake_case : Union[str, Any] = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**a_ )
lowerCamelCase__ =field(default=__snake_case, metadata={'help': 'Trace the models using torchscript'} )
lowerCamelCase__ =field(default=__snake_case, metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
lowerCamelCase__ =field(
default='O1', metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
}, )
@cached_property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
__snake_case : Any = torch.device('''cpu''' )
__snake_case : List[Any] = 0
elif is_torch_tpu_available():
__snake_case : List[Any] = xm.xla_device()
__snake_case : str = 0
else:
__snake_case : Optional[int] = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
__snake_case : Union[str, Any] = torch.cuda.device_count()
return device, n_gpu
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.n_gpu > 0
| 24
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
SCREAMING_SNAKE_CASE : int = datasets.utils.logging.get_logger(__name__)
@dataclass
class _UpperCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
lowerCamelCase__ =10000
lowerCamelCase__ =None
lowerCamelCase__ =None
class _UpperCAmelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
lowerCamelCase__ =ParquetConfig
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__snake_case : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a_ , (str, list, tuple) ):
__snake_case : Union[str, Any] = data_files
if isinstance(a_ , a_ ):
__snake_case : Union[str, Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__snake_case : List[Any] = [dl_manager.iter_files(a_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__snake_case : int = []
for split_name, files in data_files.items():
if isinstance(a_ , a_ ):
__snake_case : List[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__snake_case : int = [dl_manager.iter_files(a_ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(a_ ):
with open(a_ , '''rb''' ) as f:
__snake_case : Any = datasets.Features.from_arrow_schema(pq.read_schema(a_ ) )
break
splits.append(datasets.SplitGenerator(name=a_ , gen_kwargs={'''files''': files} ) )
return splits
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__snake_case : List[Any] = table_cast(a_ , self.info.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : List[Any] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(a_ ) ):
with open(a_ , '''rb''' ) as f:
__snake_case : int = pq.ParquetFile(a_ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__snake_case : Dict = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"""{file_idx}_{batch_idx}""", self._cast_table(a_ )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(a_ )}: {e}""" )
raise
| 24
| 1
|
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
SCREAMING_SNAKE_CASE : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
SCREAMING_SNAKE_CASE : List[Any] = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def lowercase ( _snake_case : int , _snake_case : int , _snake_case : int ) ->str:
"""simple docstring"""
assert len(str(_snake_case ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
__snake_case : Union[str, Any] = year // 100
__snake_case : List[str] = (5 * (century % 4) + 2) % 7
__snake_case : Dict = year % 100
__snake_case : Optional[int] = centurian % 12
__snake_case : Dict = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__snake_case : Tuple = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__snake_case : Optional[int] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
__snake_case : Dict = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = '''sshleifer/tiny-gpt2'''
__snake_case : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , )
__snake_case : Optional[int] = TensorFlowBenchmark(a_ )
__snake_case : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = '''sgugger/tiny-distilbert-classification'''
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , only_pretrain_model=a_ , )
__snake_case : Optional[Any] = TensorFlowBenchmark(a_ )
__snake_case : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2'''
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : Any = TensorFlowBenchmark(a_ )
__snake_case : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = '''sshleifer/tiny-gpt2'''
__snake_case : Union[str, Any] = AutoConfig.from_pretrained(a_ )
__snake_case : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , )
__snake_case : List[str] = TensorFlowBenchmark(a_ , [config] )
__snake_case : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = '''sshleifer/tiny-gpt2'''
__snake_case : Optional[Any] = AutoConfig.from_pretrained(a_ )
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : Dict = TensorFlowBenchmark(a_ , [config] )
__snake_case : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = '''sshleifer/tiny-gpt2'''
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : int = TensorFlowBenchmark(a_ )
__snake_case : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = '''sshleifer/tiny-gpt2'''
__snake_case : Dict = AutoConfig.from_pretrained(a_ )
__snake_case : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : List[Any] = TensorFlowBenchmark(a_ , [config] )
__snake_case : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = '''patrickvonplaten/t5-tiny-random'''
__snake_case : Tuple = AutoConfig.from_pretrained(a_ )
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
__snake_case : List[str] = TensorFlowBenchmark(a_ , configs=[config] )
__snake_case : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = '''sshleifer/tiny-gpt2'''
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a_ , multi_process=a_ , )
__snake_case : Optional[int] = TensorFlowBenchmark(a_ )
__snake_case : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a_ , save_to_csv=a_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a_ , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(a_ , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(a_ , '''env.csv''' ) , multi_process=a_ , )
__snake_case : Union[str, Any] = TensorFlowBenchmark(a_ )
benchmark.run()
self.assertTrue(Path(os.path.join(a_ , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(a_ , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(a_ , '''env.csv''' ) ).exists() )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(a_ ):
self.assertTrue(hasattr(a_ , '''sequential''' ) )
self.assertTrue(hasattr(a_ , '''cumulative''' ) )
self.assertTrue(hasattr(a_ , '''current''' ) )
self.assertTrue(hasattr(a_ , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a_ , '''log.txt''' ) , log_print=a_ , trace_memory_line_by_line=a_ , eager_mode=a_ , multi_process=a_ , )
__snake_case : List[Any] = TensorFlowBenchmark(a_ )
__snake_case : Optional[int] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(a_ , '''log.txt''' ) ).exists() )
| 24
| 1
|
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , *a_ , a_=None , a_=None , **a_ ):
'''simple docstring'''
super().__init__(*a_ , **a_ )
__snake_case : List[str] = eval_examples
__snake_case : Optional[Any] = post_process_function
def SCREAMING_SNAKE_CASE (self , a_=None , a_=None , a_=None , a_ = "eval" ):
'''simple docstring'''
__snake_case : Union[str, Any] = self.eval_dataset if eval_dataset is None else eval_dataset
__snake_case : Union[str, Any] = self.get_eval_dataloader(a_ )
__snake_case : List[str] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case : Optional[Any] = self.compute_metrics
__snake_case : Union[str, Any] = None
__snake_case : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__snake_case : str = time.time()
try:
__snake_case : Dict = eval_loop(
a_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=a_ , metric_key_prefix=a_ , )
finally:
__snake_case : str = compute_metrics
__snake_case : Optional[Any] = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
a_ , a_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__snake_case : Optional[Any] = self.post_process_function(a_ , a_ , output.predictions )
__snake_case : Any = self.compute_metrics(a_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
__snake_case : List[str] = metrics.pop(a_ )
metrics.update(output.metrics )
else:
__snake_case : str = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(a_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__snake_case : str = self.callback_handler.on_evaluate(self.args , self.state , self.control , a_ )
return metrics
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_=None , a_ = "test" ):
'''simple docstring'''
__snake_case : Any = self.get_test_dataloader(a_ )
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case : List[str] = self.compute_metrics
__snake_case : List[Any] = None
__snake_case : List[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__snake_case : Dict = time.time()
try:
__snake_case : List[Any] = eval_loop(
a_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=a_ , metric_key_prefix=a_ , )
finally:
__snake_case : Optional[int] = compute_metrics
__snake_case : str = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
a_ , a_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__snake_case : Optional[int] = self.post_process_function(a_ , a_ , output.predictions , '''predict''' )
__snake_case : Union[str, Any] = self.compute_metrics(a_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
__snake_case : int = metrics.pop(a_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=a_ )
| 24
|
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
SCREAMING_SNAKE_CASE : Tuple = None
try:
import msvcrt
except ImportError:
SCREAMING_SNAKE_CASE : List[str] = None
try:
import fcntl
except ImportError:
SCREAMING_SNAKE_CASE : Tuple = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
SCREAMING_SNAKE_CASE : List[str] = OSError
# Data
# ------------------------------------------------
SCREAMING_SNAKE_CASE : List[Any] = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
SCREAMING_SNAKE_CASE : List[Any] = """3.0.12"""
SCREAMING_SNAKE_CASE : int = None
def lowercase ( ) ->str:
"""simple docstring"""
global _logger
__snake_case : Union[str, Any] = _logger or logging.getLogger(__name__ )
return _logger
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ ):
'''simple docstring'''
__snake_case : Optional[int] = lock_file
return None
def __str__(self ):
'''simple docstring'''
__snake_case : Tuple = f"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ ):
'''simple docstring'''
__snake_case : Optional[Any] = lock
return None
def __enter__(self ):
'''simple docstring'''
return self.lock
def __exit__(self , a_ , a_ , a_ ):
'''simple docstring'''
self.lock.release()
return None
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=-1 , a_=None ):
'''simple docstring'''
__snake_case : List[Any] = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
__snake_case : Dict = self.hash_filename_if_too_long(a_ , a_ )
# The path to the lock file.
__snake_case : str = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__snake_case : Dict = None
# The default timeout value.
__snake_case : List[Any] = timeout
# We use this lock primarily for the lock counter.
__snake_case : Tuple = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__snake_case : Optional[Any] = 0
return None
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._lock_file
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._timeout
@timeout.setter
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Dict = float(a_ )
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
raise NotImplementedError()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
raise NotImplementedError()
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._lock_file_fd is not None
def SCREAMING_SNAKE_CASE (self , a_=None , a_=0.05 ):
'''simple docstring'''
if timeout is None:
__snake_case : List[str] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__snake_case : Optional[int] = id(self )
__snake_case : str = self._lock_file
__snake_case : Optional[int] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(f"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
f"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(a_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__snake_case : Optional[int] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def SCREAMING_SNAKE_CASE (self , a_=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__snake_case : Tuple = id(self )
__snake_case : str = self._lock_file
logger().debug(f"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
__snake_case : Dict = 0
logger().debug(f"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__(self ):
'''simple docstring'''
self.acquire()
return self
def __exit__(self , a_ , a_ , a_ ):
'''simple docstring'''
self.release()
return None
def __del__(self ):
'''simple docstring'''
self.release(force=a_ )
return None
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : Any = os.path.basename(a_ )
if len(a_ ) > max_length and max_length > 0:
__snake_case : List[Any] = os.path.dirname(a_ )
__snake_case : Any = str(hash(a_ ) )
__snake_case : List[Any] = filename[: max_length - len(a_ ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(a_ , a_ )
else:
return path
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_=-1 , a_=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(a_ , timeout=a_ , max_filename_length=a_ )
__snake_case : List[str] = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__snake_case : Any = os.open(self._lock_file , a_ )
except OSError:
pass
else:
try:
msvcrt.locking(a_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(a_ )
else:
__snake_case : Dict = fd
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self._lock_file_fd
__snake_case : Dict = None
msvcrt.locking(a_ , msvcrt.LK_UNLCK , 1 )
os.close(a_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_=-1 , a_=None ):
'''simple docstring'''
__snake_case : Optional[Any] = os.statvfs(os.path.dirname(a_ ) ).f_namemax
super().__init__(a_ , timeout=a_ , max_filename_length=a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__snake_case : List[str] = os.open(self._lock_file , a_ )
try:
fcntl.flock(a_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(a_ )
else:
__snake_case : Optional[int] = fd
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self._lock_file_fd
__snake_case : Tuple = None
fcntl.flock(a_ , fcntl.LOCK_UN )
os.close(a_ )
return None
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__snake_case : Tuple = os.open(self._lock_file , a_ )
except OSError:
pass
else:
__snake_case : List[Any] = fd
return None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
os.close(self._lock_file_fd )
__snake_case : int = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
SCREAMING_SNAKE_CASE : Dict = None
if msvcrt:
SCREAMING_SNAKE_CASE : List[Any] = WindowsFileLock
elif fcntl:
SCREAMING_SNAKE_CASE : List[str] = UnixFileLock
else:
SCREAMING_SNAKE_CASE : str = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 24
| 1
|
"""simple docstring"""
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowercase ( ) ->Optional[int]:
"""simple docstring"""
__snake_case : int = torch.nn.Linear(2 , 4 )
__snake_case : Optional[Any] = torch.optim.AdamW(model.parameters() , lr=1.0 )
__snake_case : Optional[Any] = torch.optim.lr_scheduler.OneCycleLR(_snake_case , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
__snake_case : List[str] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
__snake_case : Dict = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowercase ( _snake_case : str ) ->Optional[Any]:
"""simple docstring"""
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowercase ( _snake_case : Union[str, Any] ) ->Tuple:
"""simple docstring"""
__snake_case : Dict = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(_snake_case )
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
@require_cuda
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(a_ ):
__snake_case : Any = Accelerator(cpu=a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
__snake_case : Optional[int] = GradientState()
assert state.num_steps == 1
__snake_case : str = 4
assert state.num_steps == 4
assert state.sync_gradients is True
__snake_case : List[Any] = False
assert state.sync_gradients is False
GradientState._reset_state()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Union[str, Any] = accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*a_ , **a_ ):
pass
with patch('''torch.cuda.set_device''' , a_ ), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64''' ):
__snake_case : List[Any] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , '''cuda:64''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
__snake_case : Any = get_signature(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
__snake_case : List[Any] = get_signature(a_ )
# saving hook
def save_config(a_ , a_ , a_ ):
__snake_case : Optional[Any] = {'''class_name''': models[0].__class__.__name__}
with open(os.path.join(a_ , '''data.json''' ) , '''w''' ) as f:
json.dump(a_ , a_ )
# loading hook
def load_config(a_ , a_ ):
with open(os.path.join(a_ , '''data.json''' ) , '''r''' ) as f:
__snake_case : Any = json.load(a_ )
__snake_case : List[str] = config['''class_name''']
__snake_case : str = accelerator.register_save_state_pre_hook(a_ )
__snake_case : Union[str, Any] = accelerator.register_load_state_pre_hook(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match with hooks
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# random class name to verify correct one is loaded
__snake_case : Any = '''random'''
# make sure loaded weights match with hooks
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match with hooks removed
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# random class name to verify correct one is loaded
__snake_case : Union[str, Any] = '''random'''
# make sure loaded weights match with hooks removed
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = create_components()
__snake_case : Union[str, Any] = None
# This should work
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ , a_ )
self.assertTrue(dummy_obj is None )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components()
__snake_case : Optional[int] = [1, 2, 3]
# This should work
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ , a_ )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : Dict = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map={'''''': 0} , )
__snake_case : Optional[Any] = Accelerator()
# This should work
__snake_case : Any = accelerator.prepare(a_ )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : Any = Accelerator()
with init_empty_weights():
__snake_case : List[str] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
__snake_case : Union[str, Any] = infer_auto_device_map(a_ )
__snake_case : str = '''cpu'''
__snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , device_map=a_ , load_in_abit=a_ , llm_inta_enable_fpaa_cpu_offload=a_ )
# This should not work and get value error
with self.assertRaises(a_ ):
__snake_case : Dict = accelerator.prepare(a_ )
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : str = {'''distributed_type''': DistributedType.MULTI_GPU}
with init_empty_weights():
__snake_case : Any = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
__snake_case : List[Any] = infer_auto_device_map(a_ )
__snake_case : Dict = 1
__snake_case : str = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , )
__snake_case : Any = Accelerator()
# This should not work and get value error
with self.assertRaises(a_ ):
__snake_case : Tuple = accelerator.prepare(a_ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
with init_empty_weights():
__snake_case : Dict = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
__snake_case : Tuple = infer_auto_device_map(a_ )
__snake_case : Tuple = 1
__snake_case : List[Any] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , )
__snake_case : Tuple = Accelerator()
# This should work
__snake_case : Dict = accelerator.prepare(a_ )
@require_cuda
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = torch.nn.Linear(10 , 10 )
__snake_case : List[str] = torch.optim.SGD(model.parameters() , lr=0.01 )
__snake_case : Optional[Any] = Accelerator(cpu=a_ )
__snake_case : str = accelerator.prepare(a_ )
| 24
|
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=24 , a_=2 , a_=6 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=None , a_=10_00 , ):
'''simple docstring'''
__snake_case : Any = parent
__snake_case : int = batch_size
__snake_case : Dict = seq_length
__snake_case : List[str] = is_training
__snake_case : List[Any] = use_input_mask
__snake_case : int = use_token_type_ids
__snake_case : Union[str, Any] = use_labels
__snake_case : str = vocab_size
__snake_case : int = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : int = num_attention_heads
__snake_case : str = intermediate_size
__snake_case : Union[str, Any] = hidden_act
__snake_case : int = hidden_dropout_prob
__snake_case : Union[str, Any] = attention_probs_dropout_prob
__snake_case : List[Any] = max_position_embeddings
__snake_case : Any = type_vocab_size
__snake_case : Dict = type_sequence_label_size
__snake_case : Optional[Any] = initializer_range
__snake_case : Union[str, Any] = num_labels
__snake_case : Any = scope
__snake_case : Any = range_bbox
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : int = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case : List[str] = bbox[i, j, 3]
__snake_case : Any = bbox[i, j, 1]
__snake_case : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case : List[str] = bbox[i, j, 2]
__snake_case : Union[str, Any] = bbox[i, j, 0]
__snake_case : Dict = t
__snake_case : Optional[int] = None
if self.use_input_mask:
__snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__snake_case : Dict = None
if self.use_token_type_ids:
__snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : List[str] = None
__snake_case : Union[str, Any] = None
if self.use_labels:
__snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : List[Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
__snake_case : Union[str, Any] = LiltModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Any = model(a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ )
__snake_case : str = model(a_ , bbox=a_ , token_type_ids=a_ )
__snake_case : List[str] = model(a_ , bbox=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
__snake_case : Optional[int] = self.num_labels
__snake_case : List[str] = LiltForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Tuple = model(
a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ):
'''simple docstring'''
__snake_case : Optional[Any] = LiltForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
__snake_case : int = model(
a_ , bbox=a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Dict = config_and_inputs
__snake_case : Any = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __snake_case, __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ =(
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =False
lowerCamelCase__ =False
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
return True
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = LiltModelTester(self )
__snake_case : Optional[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case : Dict = type
self.model_tester.create_and_check_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Any = LiltModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
@slow
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(a_ )
__snake_case : Dict = torch.tensor([[1, 2]] , device=a_ )
__snake_case : str = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=a_ )
# forward pass
with torch.no_grad():
__snake_case : Union[str, Any] = model(input_ids=a_ , bbox=a_ )
__snake_case : Union[str, Any] = torch.Size([1, 2, 7_68] )
__snake_case : str = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=a_ , )
self.assertTrue(outputs.last_hidden_state.shape , a_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , a_ , atol=1E-3 ) )
| 24
| 1
|
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
__snake_case : Dict = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Dict = os.path.join(a_ , '''feat_extract.json''' )
feat_extract_first.to_json_file(a_ )
__snake_case : Any = self.feature_extraction_class.from_json_file(a_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Optional[int] = feat_extract_first.save_pretrained(a_ )[0]
check_json_file_has_correct_format(a_ )
__snake_case : Dict = self.feature_extraction_class.from_pretrained(a_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = self.feature_extraction_class()
self.assertIsNotNone(a_ )
| 24
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=False , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ):
'''simple docstring'''
__snake_case : List[Any] = parent
__snake_case : List[Any] = batch_size
__snake_case : str = seq_length
__snake_case : Any = is_training
__snake_case : Any = use_input_mask
__snake_case : str = use_token_type_ids
__snake_case : Dict = use_labels
__snake_case : int = vocab_size
__snake_case : Union[str, Any] = hidden_size
__snake_case : List[str] = num_hidden_layers
__snake_case : str = num_attention_heads
__snake_case : Optional[int] = intermediate_size
__snake_case : str = hidden_act
__snake_case : Union[str, Any] = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : str = max_position_embeddings
__snake_case : Dict = type_vocab_size
__snake_case : List[Any] = type_sequence_label_size
__snake_case : Union[str, Any] = initializer_range
__snake_case : str = num_labels
__snake_case : Dict = num_choices
__snake_case : Optional[int] = scope
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Dict = None
if self.use_input_mask:
__snake_case : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Tuple = None
__snake_case : List[str] = None
__snake_case : Dict = None
if self.use_labels:
__snake_case : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__snake_case : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : List[str] = DistilBertModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case : int = model(a_ , a_ )
__snake_case : List[Any] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Optional[Any] = DistilBertForMaskedLM(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Tuple = DistilBertForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Optional[Any] = model(
a_ , attention_mask=a_ , start_positions=a_ , end_positions=a_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Any = self.num_labels
__snake_case : Optional[int] = DistilBertForSequenceClassification(a_ )
model.to(a_ )
model.eval()
__snake_case : Union[str, Any] = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = self.num_labels
__snake_case : Optional[int] = DistilBertForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Dict = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : List[Any] = self.num_choices
__snake_case : Any = DistilBertForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : Optional[int] = model(
a_ , attention_mask=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.prepare_config_and_inputs()
((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : str = config_and_inputs
__snake_case : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase__ =(
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
lowerCamelCase__ =True
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = DistilBertModelTester(self )
__snake_case : List[str] = ConfigTester(self , config_class=a_ , dim=37 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Tuple = DistilBertModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__snake_case : List[str] = True
__snake_case : Tuple = model_class(config=a_ )
__snake_case : Any = self._prepare_for_class(a_ , a_ )
__snake_case : Dict = torch.jit.trace(
a_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a_ , os.path.join(a_ , '''traced_model.pt''' ) )
__snake_case : int = torch.jit.load(os.path.join(a_ , '''traced_model.pt''' ) , map_location=a_ )
loaded(inputs_dict['''input_ids'''].to(a_ ) , inputs_dict['''attention_mask'''].to(a_ ) )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__snake_case : List[Any] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__snake_case : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__snake_case : List[Any] = model(a_ , attention_mask=a_ )[0]
__snake_case : Tuple = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , a_ )
__snake_case : Optional[int] = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1E-4 ) )
| 24
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : Dict = {
"""configuration_blip_2""": [
"""BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Blip2Config""",
"""Blip2QFormerConfig""",
"""Blip2VisionConfig""",
],
"""processing_blip_2""": ["""Blip2Processor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = [
"""BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Blip2Model""",
"""Blip2QFormerModel""",
"""Blip2PreTrainedModel""",
"""Blip2ForConditionalGeneration""",
"""Blip2VisionModel""",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 24
|
"""simple docstring"""
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( _snake_case : str , _snake_case : str , _snake_case : str ) ->List[Any]:
"""simple docstring"""
def get_masked_lm_array(_snake_case : str ):
__snake_case : int = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : str = tf.train.load_variable(_snake_case , _snake_case )
if "kernel" in name:
__snake_case : Any = array.transpose()
return torch.from_numpy(_snake_case )
def get_encoder_array(_snake_case : str ):
__snake_case : List[str] = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : Union[str, Any] = tf.train.load_variable(_snake_case , _snake_case )
if "kernel" in name:
__snake_case : Optional[int] = array.transpose()
return torch.from_numpy(_snake_case )
def get_encoder_layer_array(_snake_case : int , _snake_case : str ):
__snake_case : str = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : Optional[int] = tf.train.load_variable(_snake_case , _snake_case )
if "kernel" in name:
__snake_case : Optional[Any] = array.transpose()
return torch.from_numpy(_snake_case )
def get_encoder_attention_layer_array(_snake_case : int , _snake_case : str , _snake_case : str ):
__snake_case : Any = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
__snake_case : Dict = tf.train.load_variable(_snake_case , _snake_case )
__snake_case : int = array.reshape(_snake_case )
if "kernel" in name:
__snake_case : Optional[int] = array.transpose()
return torch.from_numpy(_snake_case )
print(f"""Loading model based on config from {config_path}...""" )
__snake_case : Optional[Any] = BertConfig.from_json_file(_snake_case )
__snake_case : Dict = BertForMaskedLM(_snake_case )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
__snake_case : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
__snake_case : BertSelfAttention = layer.attention.self
__snake_case : int = get_encoder_attention_layer_array(
_snake_case , '''_query_dense/kernel''' , self_attn.query.weight.data.shape )
__snake_case : str = get_encoder_attention_layer_array(
_snake_case , '''_query_dense/bias''' , self_attn.query.bias.data.shape )
__snake_case : str = get_encoder_attention_layer_array(
_snake_case , '''_key_dense/kernel''' , self_attn.key.weight.data.shape )
__snake_case : List[Any] = get_encoder_attention_layer_array(
_snake_case , '''_key_dense/bias''' , self_attn.key.bias.data.shape )
__snake_case : Tuple = get_encoder_attention_layer_array(
_snake_case , '''_value_dense/kernel''' , self_attn.value.weight.data.shape )
__snake_case : Union[str, Any] = get_encoder_attention_layer_array(
_snake_case , '''_value_dense/bias''' , self_attn.value.bias.data.shape )
# Self-attention Output
__snake_case : BertSelfOutput = layer.attention.output
__snake_case : Dict = get_encoder_attention_layer_array(
_snake_case , '''_output_dense/kernel''' , self_output.dense.weight.data.shape )
__snake_case : Tuple = get_encoder_attention_layer_array(
_snake_case , '''_output_dense/bias''' , self_output.dense.bias.data.shape )
__snake_case : str = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/gamma''' )
__snake_case : Any = get_encoder_layer_array(_snake_case , '''_attention_layer_norm/beta''' )
# Intermediate
__snake_case : BertIntermediate = layer.intermediate
__snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/kernel''' )
__snake_case : int = get_encoder_layer_array(_snake_case , '''_intermediate_dense/bias''' )
# Output
__snake_case : BertOutput = layer.output
__snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_dense/kernel''' )
__snake_case : Dict = get_encoder_layer_array(_snake_case , '''_output_dense/bias''' )
__snake_case : List[str] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/gamma''' )
__snake_case : Union[str, Any] = get_encoder_layer_array(_snake_case , '''_output_layer_norm/beta''' )
# Embeddings
__snake_case : Optional[int] = get_encoder_array('''_position_embedding_layer/embeddings''' )
__snake_case : str = get_encoder_array('''_type_embedding_layer/embeddings''' )
__snake_case : int = get_encoder_array('''_embedding_norm_layer/gamma''' )
__snake_case : Tuple = get_encoder_array('''_embedding_norm_layer/beta''' )
# LM Head
__snake_case : Optional[Any] = model.cls.predictions.transform
__snake_case : Dict = get_masked_lm_array('''dense/kernel''' )
__snake_case : Union[str, Any] = get_masked_lm_array('''dense/bias''' )
__snake_case : str = get_masked_lm_array('''layer_norm/gamma''' )
__snake_case : Tuple = get_masked_lm_array('''layer_norm/beta''' )
__snake_case : Tuple = get_masked_lm_array('''embedding_table''' )
# Pooling
__snake_case : Optional[Any] = BertPooler(config=_snake_case )
__snake_case : BertPooler = get_encoder_array('''_pooler_layer/kernel''' )
__snake_case : BertPooler = get_encoder_array('''_pooler_layer/bias''' )
# Export final model
model.save_pretrained(_snake_case )
# Integration test - should load without any errors ;)
__snake_case : Dict = BertForMaskedLM.from_pretrained(_snake_case )
print(new_model.eval() )
print('''Model conversion was done sucessfully!''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 24
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=12 , a_=7 , a_=True , a_=True , a_=True , a_=99 , a_=32 , a_=32 , a_=2 , a_=4 , a_=37 , a_=0.1 , a_=0.1 , a_=5_12 , a_=0.02 , a_=0 , a_=None , ):
'''simple docstring'''
__snake_case : Dict = parent
__snake_case : str = batch_size
__snake_case : Optional[Any] = seq_length
__snake_case : Optional[Any] = is_training
__snake_case : int = use_input_mask
__snake_case : int = use_labels
__snake_case : Optional[int] = vocab_size
__snake_case : Union[str, Any] = hidden_size
__snake_case : Optional[int] = projection_dim
__snake_case : List[Any] = num_hidden_layers
__snake_case : Optional[int] = num_attention_heads
__snake_case : Any = intermediate_size
__snake_case : Optional[Any] = dropout
__snake_case : Optional[Any] = attention_dropout
__snake_case : Optional[Any] = max_position_embeddings
__snake_case : List[str] = initializer_range
__snake_case : Union[str, Any] = scope
__snake_case : int = bos_token_id
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Optional[Any] = None
if self.use_input_mask:
__snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
__snake_case : Union[str, Any] = input_mask.numpy()
__snake_case , __snake_case : Optional[int] = input_mask.shape
__snake_case : Optional[int] = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(a_ ):
__snake_case : Any = 1
__snake_case : str = 0
__snake_case : List[str] = self.get_config()
return config, input_ids, tf.convert_to_tensor(a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : List[Any] = TFBlipTextModel(config=a_ )
__snake_case : int = model(a_ , attention_mask=a_ , training=a_ )
__snake_case : Optional[int] = model(a_ , training=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : List[Any] = config_and_inputs
__snake_case : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(TFBlipTextModel,) if is_tf_available() else ()
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = BlipTextModelTester(self )
__snake_case : Dict = ConfigTester(self , config_class=a_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Tuple = TFBlipTextModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def SCREAMING_SNAKE_CASE (self , a_=True ):
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=a_ )
| 24
|
"""simple docstring"""
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , a_ , a_ , a_ = None , a_ = None , a_ = False , **a_ , ):
'''simple docstring'''
super().__init__(features=a_ , cache_dir=a_ , keep_in_memory=a_ , **a_ )
__snake_case : Union[str, Any] = Sql(
cache_dir=a_ , features=a_ , sql=a_ , con=a_ , **a_ , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = None
__snake_case : Dict = None
__snake_case : Dict = None
__snake_case : List[str] = None
self.builder.download_and_prepare(
download_config=a_ , download_mode=a_ , verification_mode=a_ , base_path=a_ , )
# Build dataset for splits
__snake_case : Any = self.builder.as_dataset(
split='''train''' , verification_mode=a_ , in_memory=self.keep_in_memory )
return dataset
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_ , a_ , a_ = None , a_ = None , **a_ , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
__snake_case : List[str] = dataset
__snake_case : Tuple = name
__snake_case : Optional[int] = con
__snake_case : int = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__snake_case : Dict = num_proc
__snake_case : Dict = to_sql_kwargs
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.to_sql_kwargs.pop('''sql''' , a_ )
__snake_case : Union[str, Any] = self.to_sql_kwargs.pop('''con''' , a_ )
__snake_case : Any = self.to_sql_kwargs.pop('''index''' , a_ )
__snake_case : Optional[Any] = self._write(index=a_ , **self.to_sql_kwargs )
return written
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case , __snake_case , __snake_case : Optional[Any] = args
__snake_case : List[Any] = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
__snake_case : Dict = query_table(
table=self.dataset.data , key=slice(a_ , offset + self.batch_size ) , indices=self.dataset._indices , )
__snake_case : Tuple = batch.to_pandas()
__snake_case : str = df.to_sql(self.name , self.con , index=a_ , **a_ )
return num_rows or len(a_ )
def SCREAMING_SNAKE_CASE (self , a_ , **a_ ):
'''simple docstring'''
__snake_case : int = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
__snake_case , __snake_case : Union[str, Any] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , a_ , a_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ):
written += num_rows
return written
| 24
| 1
|
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowercase ( _snake_case : dict ) ->tuple:
"""simple docstring"""
return (data["data"], data["target"])
def lowercase ( _snake_case : np.ndarray , _snake_case : np.ndarray , _snake_case : np.ndarray ) ->np.ndarray:
"""simple docstring"""
__snake_case : Union[str, Any] = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(_snake_case , _snake_case )
# Predict target for test data
__snake_case : Any = xgb.predict(_snake_case )
__snake_case : Optional[int] = predictions.reshape(len(_snake_case ) , 1 )
return predictions
def lowercase ( ) ->None:
"""simple docstring"""
__snake_case : Union[str, Any] = fetch_california_housing()
__snake_case , __snake_case : str = data_handling(_snake_case )
__snake_case , __snake_case , __snake_case , __snake_case : Dict = train_test_split(
_snake_case , _snake_case , test_size=0.25 , random_state=1 )
__snake_case : List[str] = xgboost(_snake_case , _snake_case , _snake_case )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(_snake_case , _snake_case )}""" )
print(f"""Mean Square Error : {mean_squared_error(_snake_case , _snake_case )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 24
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[int] = {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""",
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='lxmert'
lowerCamelCase__ ={}
def __init__(self , a_=3_05_22 , a_=7_68 , a_=12 , a_=95_00 , a_=16_00 , a_=4_00 , a_=30_72 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=2 , a_=0.02 , a_=1E-12 , a_=9 , a_=5 , a_=5 , a_=20_48 , a_=4 , a_=6.67 , a_=True , a_=True , a_=True , a_=True , a_=True , a_=True , a_=True , **a_ , ):
'''simple docstring'''
__snake_case : Optional[int] = vocab_size
__snake_case : List[str] = hidden_size
__snake_case : List[Any] = num_attention_heads
__snake_case : int = hidden_act
__snake_case : int = intermediate_size
__snake_case : Any = hidden_dropout_prob
__snake_case : List[Any] = attention_probs_dropout_prob
__snake_case : Tuple = max_position_embeddings
__snake_case : List[str] = type_vocab_size
__snake_case : str = initializer_range
__snake_case : Tuple = layer_norm_eps
__snake_case : List[Any] = num_qa_labels
__snake_case : int = num_object_labels
__snake_case : Optional[Any] = num_attr_labels
__snake_case : Union[str, Any] = l_layers
__snake_case : Optional[int] = x_layers
__snake_case : Optional[int] = r_layers
__snake_case : Tuple = visual_feat_dim
__snake_case : Optional[int] = visual_pos_dim
__snake_case : Dict = visual_loss_normalizer
__snake_case : str = task_matched
__snake_case : Optional[Any] = task_mask_lm
__snake_case : List[str] = task_obj_predict
__snake_case : Optional[Any] = task_qa
__snake_case : Any = visual_obj_loss
__snake_case : int = visual_attr_loss
__snake_case : List[Any] = visual_feat_loss
__snake_case : Optional[Any] = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**a_ )
| 24
| 1
|
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class _UpperCAmelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =CpmAntTokenizer
lowerCamelCase__ =False
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
super().setUp()
__snake_case : Optional[int] = [
'''<d>''',
'''</d>''',
'''<s>''',
'''</s>''',
'''</_>''',
'''<unk>''',
'''<pad>''',
'''</n>''',
'''我''',
'''是''',
'''C''',
'''P''',
'''M''',
'''A''',
'''n''',
'''t''',
]
__snake_case : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
@tooslow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' )
__snake_case : Tuple = '''今天天气真好!'''
__snake_case : int = ['''今天''', '''天气''', '''真''', '''好''', '''!''']
__snake_case : int = tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
__snake_case : List[Any] = '''今天天气真好!'''
__snake_case : Dict = [tokenizer.bos_token] + tokens
__snake_case : List[str] = [6, 98_02, 1_49_62, 20_82, 8_31, 2_44]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , a_ )
__snake_case : Optional[Any] = tokenizer.decode(a_ )
self.assertEqual(a_ , a_ )
| 24
|
"""simple docstring"""
def lowercase ( _snake_case : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = len(_snake_case )
__snake_case : str = sum(_snake_case )
__snake_case : Dict = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__snake_case : Optional[Any] = True
for i in range(1 , s + 1 ):
__snake_case : int = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__snake_case : Union[str, Any] = dp[i][j - 1]
if arr[i - 1] <= j:
__snake_case : Tuple = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__snake_case : List[str] = s - 2 * j
break
return diff
| 24
| 1
|
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ = "cpu" , a_ = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
__snake_case : Union[str, Any] = device
__snake_case : int = CLIPTokenizerFast.from_pretrained(a_ )
__snake_case : Any = [0.4814_5466, 0.457_8275, 0.4082_1073]
__snake_case : Optional[Any] = [0.2686_2954, 0.2613_0258, 0.2757_7711]
__snake_case : int = torchvision.transforms.Normalize(self.image_mean , self.image_std )
__snake_case : List[Any] = torchvision.transforms.Resize(2_24 )
__snake_case : str = torchvision.transforms.CenterCrop(2_24 )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Dict = self.resize(a_ )
__snake_case : Optional[int] = self.center_crop(a_ )
__snake_case : List[str] = self.normalize(a_ )
return images
def __call__(self , a_=None , a_=None , **a_ ):
'''simple docstring'''
__snake_case : str = self.tokenizer(text=a_ , **a_ )
__snake_case : Dict = self.preprocess_img(a_ )
__snake_case : Dict = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__(self , a_=10 , a_=0.01 , a_=None , a_=None , a_=None , a_=None , a_=None , a_=None , a_=False , a_=True , a_="image" , a_=True , a_=False , a_=False , a_=False , ):
'''simple docstring'''
super().__init__()
__snake_case : Union[str, Any] = None
__snake_case : List[Any] = device if device else get_device()
if vqgan:
__snake_case : List[str] = vqgan
else:
__snake_case : Optional[Any] = load_vqgan(self.device , conf_path=a_ , ckpt_path=a_ )
self.vqgan.eval()
if clip:
__snake_case : List[Any] = clip
else:
__snake_case : Optional[int] = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
__snake_case : Optional[int] = ProcessorGradientFlow(device=self.device )
__snake_case : Optional[int] = iterations
__snake_case : Any = lr
__snake_case : Optional[Any] = log
__snake_case : int = make_grid
__snake_case : List[str] = return_val
__snake_case : int = quantize
__snake_case : Optional[Any] = self.vqgan.decoder.z_shape
def SCREAMING_SNAKE_CASE (self , a_=None , a_=None , a_=5 , a_=True ):
'''simple docstring'''
__snake_case : Union[str, Any] = []
if output_path is None:
__snake_case : str = '''./animation.gif'''
if input_path is None:
__snake_case : Any = self.save_path
__snake_case : Optional[Any] = sorted(glob(input_path + '''/*''' ) )
if not len(a_ ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(a_ ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
__snake_case : Optional[int] = total_duration / len(a_ )
__snake_case : int = [frame_duration] * len(a_ )
if extend_frames:
__snake_case : Dict = 1.5
__snake_case : List[Any] = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(a_ ) )
imageio.mimsave(a_ , a_ , duration=a_ )
print(f"""gif saved to {output_path}""" )
def SCREAMING_SNAKE_CASE (self , a_=None , a_=None ):
'''simple docstring'''
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
__snake_case : Dict = preprocess(Image.open(a_ ) , target_image_size=2_56 ).to(self.device )
__snake_case : Tuple = preprocess_vqgan(a_ )
__snake_case , *__snake_case : Union[str, Any] = self.vqgan.encode(a_ )
return z
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Any = self.latent.detach().requires_grad_()
__snake_case : str = base_latent + transform_vector
if self.quantize:
__snake_case , *__snake_case : str = self.vqgan.quantize(a_ )
else:
__snake_case : Tuple = trans_latent
return self.vqgan.decode(a_ )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_=None ):
'''simple docstring'''
__snake_case : List[str] = self.clip_preprocessor(text=a_ , images=a_ , return_tensors='''pt''' , padding=a_ )
__snake_case : Dict = self.clip(**a_ )
__snake_case : Optional[Any] = clip_outputs.logits_per_image
if weights is not None:
__snake_case : Dict = similarity_logits * weights
return similarity_logits.sum()
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : str = self._get_clip_similarity(pos_prompts['''prompts'''] , a_ , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
__snake_case : Any = self._get_clip_similarity(neg_prompts['''prompts'''] , a_ , weights=neg_prompts['''weights'''] )
else:
__snake_case : Optional[Any] = torch.tensor([1] , device=self.device )
__snake_case : int = -torch.log(a_ ) + torch.log(a_ )
return loss
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : int = torch.randn_like(self.latent , requires_grad=a_ , device=self.device )
__snake_case : List[str] = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
__snake_case : Optional[int] = self._add_vector(a_ )
__snake_case : Tuple = loop_post_process(a_ )
__snake_case : Tuple = self._get_CLIP_loss(a_ , a_ , a_ )
print('''CLIP loss''' , a_ )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=a_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ):
'''simple docstring'''
wandb.init(reinit=a_ , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
__snake_case : Optional[int] = Image.open(a_ )
__snake_case : Any = image.resize((2_56, 2_56) )
wandb.log('''Original Image''' , wandb.Image(a_ ) )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
if not prompts:
return []
__snake_case : Dict = []
__snake_case : List[Any] = []
if isinstance(a_ , a_ ):
__snake_case : List[str] = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(a_ , (tuple, list) ):
__snake_case : int = prompt[0]
__snake_case : str = float(prompt[1] )
elif ":" in prompt:
__snake_case , __snake_case : Any = prompt.split(''':''' )
__snake_case : str = float(a_ )
else:
__snake_case : Optional[int] = prompt
__snake_case : str = 1.0
processed_prompts.append(a_ )
weights.append(a_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(a_ , device=self.device ),
}
def SCREAMING_SNAKE_CASE (self , a_ , a_=None , a_=None , a_=True , a_=False , a_=True , a_=True , a_=None , ):
'''simple docstring'''
if image_path:
__snake_case : Optional[Any] = self._get_latent(a_ )
else:
__snake_case : Union[str, Any] = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(a_ , a_ , a_ )
assert pos_prompts, "You must provide at least one positive prompt."
__snake_case : Optional[int] = self.process_prompts(a_ )
__snake_case : Union[str, Any] = self.process_prompts(a_ )
if save_final and save_path is None:
__snake_case : Union[str, Any] = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(a_ ):
os.makedirs(a_ )
else:
__snake_case : List[Any] = save_path + '''_''' + get_timestamp()
os.makedirs(a_ )
__snake_case : List[str] = save_path
__snake_case : int = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(a_ ) )
__snake_case : Any = loop_post_process(a_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(a_ , a_ , a_ ) ):
if show_intermediate:
show_pil(a_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}.png""" ) )
if self.log:
wandb.log({'''Image''': wandb.Image(a_ )} )
if show_final:
show_pil(a_ )
if save_final:
transformed_img.save(os.path.join(self.save_path , f"""iter_{iter:03d}_final.png""" ) )
| 24
|
"""simple docstring"""
from collections.abc import Callable
def lowercase ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) ->float:
"""simple docstring"""
__snake_case : float = a
__snake_case : float = b
if function(_snake_case ) == 0: # one of the a or b is a root for the function
return a
elif function(_snake_case ) == 0:
return b
elif (
function(_snake_case ) * function(_snake_case ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
__snake_case : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_snake_case ) == 0:
return mid
elif function(_snake_case ) * function(_snake_case ) < 0:
__snake_case : List[str] = mid
else:
__snake_case : str = mid
__snake_case : str = start + (end - start) / 2.0
return mid
def lowercase ( _snake_case : float ) ->float:
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 24
| 1
|
"""simple docstring"""
def lowercase ( _snake_case : int = 1 , _snake_case : int = 1_000 ) ->int:
"""simple docstring"""
__snake_case : List[Any] = 1
__snake_case : int = 0
for divide_by_number in range(_snake_case , digit + 1 ):
__snake_case : list[int] = []
__snake_case : List[Any] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(_snake_case ):
__snake_case : Optional[int] = len(_snake_case )
__snake_case : Union[str, Any] = divide_by_number
else:
has_been_divided.append(_snake_case )
__snake_case : str = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : List[str] = {
"""configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""],
"""tokenization_luke""": ["""LukeTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
"""LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LukeForEntityClassification""",
"""LukeForEntityPairClassification""",
"""LukeForEntitySpanClassification""",
"""LukeForMultipleChoice""",
"""LukeForQuestionAnswering""",
"""LukeForSequenceClassification""",
"""LukeForTokenClassification""",
"""LukeForMaskedLM""",
"""LukeModel""",
"""LukePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 24
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : Dict = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = [
"""VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMSNModel""",
"""ViTMSNForImageClassification""",
"""ViTMSNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 24
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =['image_processor', 'tokenizer']
lowerCamelCase__ ='CLIPImageProcessor'
lowerCamelCase__ =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self , a_=None , a_=None , **a_ ):
'''simple docstring'''
__snake_case : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a_ , )
__snake_case : Union[str, Any] = kwargs.pop('''feature_extractor''' )
__snake_case : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a_ , a_ )
def __call__(self , a_=None , a_=None , a_=None , **a_ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__snake_case : Dict = self.tokenizer(a_ , return_tensors=a_ , **a_ )
if images is not None:
__snake_case : Optional[int] = self.image_processor(a_ , return_tensors=a_ , **a_ )
if text is not None and images is not None:
__snake_case : List[str] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a_ , **a_ )
def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ):
'''simple docstring'''
return self.tokenizer.decode(*a_ , **a_ )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.tokenizer.model_input_names
__snake_case : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 24
| 1
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _UpperCAmelCase :
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE (*a_ , **a_ ):
'''simple docstring'''
pass
def lowercase ( _snake_case : Tuple ) ->Union[str, Any]:
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
SCREAMING_SNAKE_CASE : Union[str, Any] = (
"""https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"""
)
@is_pipeline_test
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = pipeline(
'''document-question-answering''' , model=a_ , tokenizer=a_ , image_processor=a_ )
__snake_case : Any = INVOICE_URL
__snake_case : List[str] = list(zip(*apply_tesseract(load_image(a_ ) , a_ , '''''' ) ) )
__snake_case : Any = '''What is the placebo?'''
__snake_case : Any = [
{
'''image''': load_image(a_ ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : Optional[Any] = dqa_pipeline(a_ , top_k=2 )
self.assertEqual(
a_ , [
[
{'''score''': ANY(a_ ), '''answer''': ANY(a_ ), '''start''': ANY(a_ ), '''end''': ANY(a_ )},
{'''score''': ANY(a_ ), '''answer''': ANY(a_ ), '''start''': ANY(a_ ), '''end''': ANY(a_ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
__snake_case : Union[str, Any] = INVOICE_URL
__snake_case : Tuple = '''How many cats are there?'''
__snake_case : Tuple = [
{'''score''': 0.0001, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
__snake_case : Union[str, Any] = dqa_pipeline(image=a_ , question=a_ , top_k=2 )
self.assertEqual(nested_simplify(a_ , decimals=4 ) , a_ )
__snake_case : int = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(a_ , decimals=4 ) , a_ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__snake_case : List[Any] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__snake_case : List[Any] = dqa_pipeline(image=a_ , question=a_ , top_k=2 )
self.assertEqual(a_ , [] )
# We can optionnally pass directly the words and bounding boxes
__snake_case : Dict = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__snake_case : Union[str, Any] = []
__snake_case : int = []
__snake_case : List[str] = dqa_pipeline(image=a_ , question=a_ , words=a_ , boxes=a_ , top_k=2 )
self.assertEqual(a_ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
__snake_case : str = INVOICE_URL
__snake_case : Any = '''What is the invoice number?'''
__snake_case : Union[str, Any] = dqa_pipeline(image=a_ , question=a_ , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case : Union[str, Any] = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case : Dict = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
[
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
__snake_case : str = INVOICE_URL
__snake_case : List[str] = '''What is the invoice number?'''
__snake_case : Tuple = dqa_pipeline(image=a_ , question=a_ , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case : str = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case : Optional[int] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
[
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=a_ )
__snake_case : int = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=a_ , revision='''3dc6de3''' , )
__snake_case : int = INVOICE_URL
__snake_case : List[str] = '''What is the invoice number?'''
__snake_case : Union[str, Any] = dqa_pipeline(image=a_ , question=a_ , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__snake_case : Tuple = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__snake_case : Tuple = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
[
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
__snake_case : Union[str, Any] = list(zip(*apply_tesseract(load_image(a_ ) , a_ , '''''' ) ) )
# This model should also work if `image` is set to None
__snake_case : Dict = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=a_ )
__snake_case : int = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=a_ , revision='''3dc6de3''' , max_seq_len=50 , )
__snake_case : int = INVOICE_URL
__snake_case : Optional[int] = '''What is the invoice number?'''
__snake_case : str = dqa_pipeline(image=a_ , question=a_ , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__snake_case : List[Any] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
[
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
__snake_case : Union[str, Any] = list(zip(*apply_tesseract(load_image(a_ ) , a_ , '''''' ) ) )
# This model should also work if `image` is set to None
__snake_case : Any = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
__snake_case : int = INVOICE_URL
__snake_case : Optional[Any] = '''What is the invoice number?'''
__snake_case : Optional[int] = dqa_pipeline(image=a_ , question=a_ , top_k=2 )
self.assertEqual(nested_simplify(a_ , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
| 24
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : int = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE : List[Any] = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""",
"""facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE : Tuple = {
"""facebook/mbart-large-en-ro""": 1024,
"""facebook/mbart-large-cc25""": 1024,
}
# fmt: off
SCREAMING_SNAKE_CASE : List[Any] = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =['input_ids', 'attention_mask']
lowerCamelCase__ =MBartTokenizer
lowerCamelCase__ =[]
lowerCamelCase__ =[]
def __init__(self , a_=None , a_=None , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , a_=None , a_=None , a_=None , **a_ , ):
'''simple docstring'''
__snake_case : Optional[int] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
super().__init__(
vocab_file=a_ , tokenizer_file=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , cls_token=a_ , unk_token=a_ , pad_token=a_ , mask_token=a_ , src_lang=a_ , tgt_lang=a_ , additional_special_tokens=a_ , **a_ , )
__snake_case : Tuple = vocab_file
__snake_case : Optional[Any] = False if not self.vocab_file else True
__snake_case : Dict = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
__snake_case : Optional[int] = {
lang_code: self.convert_tokens_to_ids(a_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__snake_case : List[Any] = src_lang if src_lang is not None else '''en_XX'''
__snake_case : Any = self.convert_tokens_to_ids(self._src_lang )
__snake_case : Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
__snake_case : Tuple = [self.sep_token_id]
__snake_case : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , **a_ ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__snake_case : Optional[int] = src_lang
__snake_case : Tuple = self(a_ , add_special_tokens=a_ , return_tensors=a_ , **a_ )
__snake_case : Union[str, Any] = self.convert_tokens_to_ids(a_ )
__snake_case : int = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE (self , a_ , a_ = "en_XX" , a_ = None , a_ = "ro_RO" , **a_ , ):
'''simple docstring'''
__snake_case : int = src_lang
__snake_case : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(a_ , a_ , **a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : int = self.convert_tokens_to_ids(a_ )
__snake_case : List[Any] = []
__snake_case : Any = [self.eos_token_id, self.cur_lang_code]
__snake_case : List[str] = self.convert_ids_to_tokens(self.prefix_tokens )
__snake_case : Dict = self.convert_ids_to_tokens(self.suffix_tokens )
__snake_case : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : int = self.convert_tokens_to_ids(a_ )
__snake_case : Optional[Any] = []
__snake_case : Dict = [self.eos_token_id, self.cur_lang_code]
__snake_case : str = self.convert_ids_to_tokens(self.prefix_tokens )
__snake_case : Any = self.convert_ids_to_tokens(self.suffix_tokens )
__snake_case : Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(a_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
__snake_case : Optional[Any] = os.path.join(
a_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file , a_ )
return (out_vocab_file,)
| 24
| 1
|
"""simple docstring"""
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_ , a_ ):
'''simple docstring'''
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
__snake_case : int = img
__snake_case : Optional[int] = img.shape[1]
__snake_case : str = img.shape[0]
__snake_case : int = dst_width
__snake_case : Any = dst_height
__snake_case : Tuple = self.src_w / self.dst_w
__snake_case : Any = self.src_h / self.dst_h
__snake_case : Optional[Any] = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 2_55
)
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for i in range(self.dst_h ):
for j in range(self.dst_w ):
__snake_case : List[str] = self.img[self.get_y(a_ )][self.get_x(a_ )]
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return int(self.ratio_x * x )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return int(self.ratio_y * y )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = 800, 600
SCREAMING_SNAKE_CASE : Any = imread("""image_data/lena.jpg""", 1)
SCREAMING_SNAKE_CASE : List[str] = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}', n.output
)
waitKey(0)
destroyAllWindows()
| 24
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : Union[str, Any] = logging.getLogger(__name__)
@dataclass(frozen=__snake_case )
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =42
lowerCamelCase__ =42
lowerCamelCase__ =None
lowerCamelCase__ =None
lowerCamelCase__ =None
@dataclass(frozen=__snake_case )
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =42
lowerCamelCase__ =None
lowerCamelCase__ =None
lowerCamelCase__ =None
lowerCamelCase__ =None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =42
def __init__(self , a_ , a_ , a_ , a_ = None , a_=False , a_ = False , ):
'''simple docstring'''
__snake_case : Any = hans_processors[task]()
__snake_case : int = os.path.join(
a_ , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a_ ) , a_ , ) , )
__snake_case : Tuple = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case : Dict = label_list[2], label_list[1]
__snake_case : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__snake_case : int = cached_features_file + '''.lock'''
with FileLock(a_ ):
if os.path.exists(a_ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
__snake_case : Union[str, Any] = torch.load(a_ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
__snake_case : Dict = (
processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ )
)
logger.info('''Training examples: %s''' , len(a_ ) )
__snake_case : Optional[int] = hans_convert_examples_to_features(a_ , a_ , a_ , a_ )
logger.info('''Saving features into cached file %s''' , a_ )
torch.save(self.features , a_ )
def __len__(self ):
'''simple docstring'''
return len(self.features )
def __getitem__(self , a_ ):
'''simple docstring'''
return self.features[i]
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =42
def __init__(self , a_ , a_ , a_ , a_ = 1_28 , a_=False , a_ = False , ):
'''simple docstring'''
__snake_case : List[Any] = hans_processors[task]()
__snake_case : str = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case : Tuple = label_list[2], label_list[1]
__snake_case : Dict = label_list
__snake_case : Optional[Any] = processor.get_dev_examples(a_ ) if evaluate else processor.get_train_examples(a_ )
__snake_case : Dict = hans_convert_examples_to_features(a_ , a_ , a_ , a_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(a_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__snake_case : Union[str, Any] = tf.data.Dataset.from_generator(
a_ , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.dataset
def __len__(self ):
'''simple docstring'''
return len(self.features )
def __getitem__(self , a_ ):
'''simple docstring'''
return self.features[i]
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.label_list
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(a_ , '''heuristics_train_set.txt''' ) ) , '''train''' )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(a_ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : List[Any] = []
for i, line in enumerate(a_ ):
if i == 0:
continue
__snake_case : Tuple = '''%s-%s''' % (set_type, line[0])
__snake_case : Dict = line[5]
__snake_case : int = line[6]
__snake_case : Dict = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
__snake_case : List[Any] = line[0]
examples.append(InputExample(guid=a_ , text_a=a_ , text_b=a_ , label=a_ , pairID=a_ ) )
return examples
def lowercase ( _snake_case : List[InputExample] , _snake_case : List[str] , _snake_case : int , _snake_case : PreTrainedTokenizer , ) ->List[str]:
"""simple docstring"""
__snake_case : Optional[int] = {label: i for i, label in enumerate(_snake_case )}
__snake_case : Tuple = []
for ex_index, example in tqdm.tqdm(enumerate(_snake_case ) , desc='''convert examples to features''' ):
if ex_index % 10_000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
__snake_case : List[Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=_snake_case , max_length=_snake_case , padding='''max_length''' , truncation=_snake_case , return_overflowing_tokens=_snake_case , )
__snake_case : List[Any] = label_map[example.label] if example.label in label_map else 0
__snake_case : Union[str, Any] = int(example.pairID )
features.append(InputFeatures(**_snake_case , label=_snake_case , pairID=_snake_case ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
SCREAMING_SNAKE_CASE : Dict = {
"""hans""": 3,
}
SCREAMING_SNAKE_CASE : str = {
"""hans""": HansProcessor,
}
| 24
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowercase ( _snake_case : list[int | str] ) ->None:
"""simple docstring"""
create_state_space_tree(_snake_case , [] , 0 , [0 for i in range(len(_snake_case ) )] )
def lowercase ( _snake_case : list[int | str] , _snake_case : list[int | str] , _snake_case : int , _snake_case : list[int] , ) ->None:
"""simple docstring"""
if index == len(_snake_case ):
print(_snake_case )
return
for i in range(len(_snake_case ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
__snake_case : Union[str, Any] = True
create_state_space_tree(_snake_case , _snake_case , index + 1 , _snake_case )
current_sequence.pop()
__snake_case : Any = False
SCREAMING_SNAKE_CASE : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
SCREAMING_SNAKE_CASE : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 24
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = {
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='gptsan-japanese'
lowerCamelCase__ =[
'past_key_values',
]
lowerCamelCase__ ={
'hidden_size': 'd_model',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self , a_=3_60_00 , a_=12_80 , a_=10_24 , a_=81_92 , a_=40_96 , a_=1_28 , a_=10 , a_=0 , a_=16 , a_=16 , a_=1_28 , a_=0.0 , a_=1E-5 , a_=False , a_=0.0 , a_="float32" , a_=False , a_=False , a_=False , a_=0.002 , a_=False , a_=True , a_=3_59_98 , a_=3_59_95 , a_=3_59_99 , **a_ , ):
'''simple docstring'''
__snake_case : Any = vocab_size
__snake_case : str = max_position_embeddings
__snake_case : Any = d_model
__snake_case : List[str] = d_ff
__snake_case : Dict = d_ext
__snake_case : Optional[Any] = d_spout
__snake_case : int = num_switch_layers
__snake_case : List[Any] = num_ext_layers
__snake_case : Any = num_switch_layers + num_ext_layers
__snake_case : Optional[int] = num_heads
__snake_case : Tuple = num_experts
__snake_case : List[Any] = expert_capacity
__snake_case : Dict = dropout_rate
__snake_case : Optional[Any] = layer_norm_epsilon
__snake_case : Dict = router_bias
__snake_case : str = router_jitter_noise
__snake_case : List[str] = router_dtype
__snake_case : Union[str, Any] = router_ignore_padding_tokens
__snake_case : List[str] = output_hidden_states
__snake_case : Optional[Any] = output_attentions
__snake_case : Any = initializer_factor
__snake_case : int = output_router_logits
__snake_case : Union[str, Any] = use_cache
super().__init__(
separator_token_id=a_ , pad_token_id=a_ , eos_token_id=a_ , **a_ , )
| 24
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : str = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE : int = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
"""tokenizer_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE : Optional[Any] = {
"""google/rembert""": 256,
}
SCREAMING_SNAKE_CASE : Any = """▁"""
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =RemBertTokenizer
def __init__(self , a_=None , a_=None , a_=True , a_=True , a_=False , a_="[CLS]" , a_="[SEP]" , a_="<unk>" , a_="[SEP]" , a_="<pad>" , a_="[CLS]" , a_="[MASK]" , **a_ , ):
'''simple docstring'''
__snake_case : Dict = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
super().__init__(
a_ , tokenizer_file=a_ , do_lower_case=a_ , remove_space=a_ , keep_accents=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , **a_ , )
__snake_case : List[Any] = do_lower_case
__snake_case : List[str] = remove_space
__snake_case : int = keep_accents
__snake_case : str = vocab_file
__snake_case : Dict = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
__snake_case : Any = [self.sep_token_id]
__snake_case : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None , a_ = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1]
return [1] + ([0] * len(a_ )) + [1]
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
__snake_case : str = [self.sep_token_id]
__snake_case : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
if not os.path.isdir(a_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(a_ ) )
return
__snake_case : Tuple = os.path.join(
a_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file , a_ )
return (out_vocab_file,)
| 24
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : str = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
SCREAMING_SNAKE_CASE : int = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def lowercase ( _snake_case : Optional[int] ) ->int:
"""simple docstring"""
__snake_case : int = {}
with open(_snake_case , '''r''' ) as file:
for line_number, line in enumerate(_snake_case ):
__snake_case : Union[str, Any] = line.strip()
if line:
__snake_case : str = line.split()
__snake_case : Union[str, Any] = line_number
__snake_case : Dict = words[0]
__snake_case : str = value
return result
def lowercase ( _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Tuple , _snake_case : Any , _snake_case : List[str] ) ->List[str]:
"""simple docstring"""
for attribute in key.split('''.''' ):
__snake_case : Dict = getattr(_snake_case , _snake_case )
__snake_case : Any = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_snake_case ):
__snake_case : int = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__snake_case : str = '''param'''
if weight_type is not None and weight_type != "param":
__snake_case : Union[str, Any] = getattr(_snake_case , _snake_case ).shape
elif weight_type is not None and weight_type == "param":
__snake_case : Optional[Any] = hf_pointer
for attribute in hf_param_name.split('''.''' ):
__snake_case : Dict = getattr(_snake_case , _snake_case )
__snake_case : List[str] = shape_pointer.shape
# let's reduce dimension
__snake_case : int = value[0]
else:
__snake_case : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__snake_case : List[Any] = value
elif weight_type == "weight_g":
__snake_case : Tuple = value
elif weight_type == "weight_v":
__snake_case : str = value
elif weight_type == "bias":
__snake_case : str = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
__snake_case : List[Any] = getattr(_snake_case , _snake_case )
__snake_case : int = value
else:
__snake_case : List[Any] = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowercase ( _snake_case : Any , _snake_case : List[Any] , _snake_case : Dict , _snake_case : List[str] , _snake_case : int ) ->int:
"""simple docstring"""
__snake_case : Optional[Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_snake_case ):
__snake_case : Dict = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__snake_case : List[str] = '''param'''
if weight_type is not None and weight_type != "param":
__snake_case : str = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__snake_case : Tuple = '''.'''.join([key, hf_param_name] )
else:
__snake_case : Optional[int] = key
__snake_case : List[Any] = value if '''lm_head''' in full_key else value[0]
SCREAMING_SNAKE_CASE : Tuple = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def lowercase ( _snake_case : str , _snake_case : List[Any] , _snake_case : Tuple=None , _snake_case : int=None ) ->Dict:
"""simple docstring"""
__snake_case : Tuple = False
for key, mapped_key in MAPPING.items():
__snake_case : int = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__snake_case : int = True
if "*" in mapped_key:
__snake_case : List[Any] = name.split(_snake_case )[0].split('''.''' )[-2]
__snake_case : Tuple = mapped_key.replace('''*''' , _snake_case )
if "weight_g" in name:
__snake_case : Union[str, Any] = '''weight_g'''
elif "weight_v" in name:
__snake_case : List[str] = '''weight_v'''
elif "bias" in name:
__snake_case : Any = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__snake_case : List[Any] = '''weight'''
else:
__snake_case : Union[str, Any] = None
if hf_dict is not None:
rename_dict(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
else:
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
return is_used
return is_used
def lowercase ( _snake_case : str , _snake_case : Dict , _snake_case : List[str] ) ->Any:
"""simple docstring"""
__snake_case : Union[str, Any] = []
__snake_case : Union[str, Any] = fairseq_model.state_dict()
__snake_case : str = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__snake_case : str = False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , )
__snake_case : Union[str, Any] = True
else:
__snake_case : Optional[Any] = load_wavaveca_layer(_snake_case , _snake_case , _snake_case )
if not is_used:
unused_weights.append(_snake_case )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowercase ( _snake_case : Any , _snake_case : str , _snake_case : Any , _snake_case : Tuple , _snake_case : List[str] ) ->Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = full_name.split('''conv_layers.''' )[-1]
__snake_case : str = name.split('''.''' )
__snake_case : Optional[int] = int(items[0] )
__snake_case : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__snake_case : int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__snake_case : Any = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__snake_case : Any = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__snake_case : List[str] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_snake_case )
@torch.no_grad()
def lowercase ( _snake_case : int , _snake_case : Union[str, Any] , _snake_case : Any=None , _snake_case : str=None , _snake_case : List[Any]=True , _snake_case : int=False ) ->Dict:
"""simple docstring"""
if config_path is not None:
__snake_case : Optional[Any] = WavaVecaConfig.from_pretrained(_snake_case )
else:
__snake_case : Tuple = WavaVecaConfig()
if is_seq_class:
__snake_case : Optional[int] = read_txt_into_dict(_snake_case )
__snake_case : List[Any] = idalabel
__snake_case : int = WavaVecaForSequenceClassification(_snake_case )
__snake_case : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , )
feature_extractor.save_pretrained(_snake_case )
elif is_finetuned:
if dict_path:
__snake_case : int = Dictionary.load(_snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__snake_case : Tuple = target_dict.pad_index
__snake_case : int = target_dict.bos_index
__snake_case : Tuple = target_dict.eos_index
__snake_case : Optional[Any] = len(target_dict.symbols )
__snake_case : Any = os.path.join(_snake_case , '''vocab.json''' )
if not os.path.isdir(_snake_case ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_snake_case ) )
return
os.makedirs(_snake_case , exist_ok=_snake_case )
__snake_case : Optional[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
__snake_case : Dict = 0
__snake_case : List[Any] = 1
with open(_snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(_snake_case , _snake_case )
__snake_case : List[Any] = WavaVecaCTCTokenizer(
_snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_snake_case , )
__snake_case : Tuple = True if config.feat_extract_norm == '''layer''' else False
__snake_case : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_snake_case , return_attention_mask=_snake_case , )
__snake_case : Tuple = WavaVecaProcessor(feature_extractor=_snake_case , tokenizer=_snake_case )
processor.save_pretrained(_snake_case )
__snake_case : Optional[int] = WavaVecaForCTC(_snake_case )
else:
__snake_case : Tuple = WavaVecaForPreTraining(_snake_case )
if is_finetuned or is_seq_class:
__snake_case , __snake_case , __snake_case : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__snake_case : Dict = argparse.Namespace(task='''audio_pretraining''' )
__snake_case : Optional[int] = fairseq.tasks.setup_task(_snake_case )
__snake_case , __snake_case , __snake_case : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_snake_case )
__snake_case : int = model[0].eval()
recursively_load_weights(_snake_case , _snake_case , not is_finetuned )
hf_wavavec.save_pretrained(_snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
SCREAMING_SNAKE_CASE : Tuple = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 24
| 1
|
"""simple docstring"""
def lowercase ( _snake_case : Optional[Any] ) ->str:
"""simple docstring"""
__snake_case : Optional[Any] = 1
__snake_case : Dict = 2
while i * i <= n:
__snake_case : Any = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def lowercase ( ) ->int:
"""simple docstring"""
__snake_case : Dict = 1
__snake_case : Optional[Any] = 1
while True:
i += 1
t_num += i
if count_divisors(_snake_case ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 24
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=__snake_case ):
'''simple docstring'''
lowerCamelCase__ =['transformers', 'torch', 'note_seq']
def __init__(self , *a_ , **a_ ):
'''simple docstring'''
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE (cls , *a_ , **a_ ):
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def SCREAMING_SNAKE_CASE (cls , *a_ , **a_ ):
'''simple docstring'''
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 24
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.